aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-09-18 14:02:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-09-18 14:02:26 -0400
commitb0e7031ac08fa0aa242531c8d9a0cf9ae8ee276d (patch)
tree86ae983c51b9df07ead6f00aeddbf276f672c839 /net
parent01a7143586f51f80e1b29ebf240c6e5390657450 (diff)
parent8e2ec639173f325977818c45011ee176ef2b11f6 (diff)
Merge git://github.com/davem330/net
* git://github.com/davem330/net: (62 commits) ipv6: don't use inetpeer to store metrics for routes. can: ti_hecc: include linux/io.h IRDA: Fix global type conflicts in net/irda/irsysctl.c v2 net: Handle different key sizes between address families in flow cache net: Align AF-specific flowi structs to long ipv4: Fix fib_info->fib_metrics leak caif: fix a potential NULL dereference sctp: deal with multiple COOKIE_ECHO chunks ibmveth: Fix checksum offload failure handling ibmveth: Checksum offload is always disabled ibmveth: Fix issue with DMA mapping failure ibmveth: Fix DMA unmap error pch_gbe: support ML7831 IOH pch_gbe: added the process of FIFO over run error pch_gbe: fixed the issue which receives an unnecessary packet. sfc: Use 64-bit writes for TX push where possible Revert "sfc: Use write-combining to reduce TX latency" and follow-ups bnx2x: Fix ethtool advertisement bnx2x: Fix 578xx link LED bnx2x: Fix XMAC loopback test ...
Diffstat (limited to 'net')
-rw-r--r--net/bridge/netfilter/Kconfig2
-rw-r--r--net/caif/caif_dev.c6
-rw-r--r--net/can/af_can.c2
-rw-r--r--net/core/dev.c8
-rw-r--r--net/core/flow.c36
-rw-r--r--net/core/skbuff.c22
-rw-r--r--net/ethernet/eth.c2
-rw-r--r--net/ipv4/af_inet.c7
-rw-r--r--net/ipv4/fib_semantics.c10
-rw-r--r--net/ipv4/netfilter/ip_queue.c12
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/tcp_ipv4.c49
-rw-r--r--net/ipv6/datagram.c5
-rw-r--r--net/ipv6/ip6_flowlabel.c8
-rw-r--r--net/ipv6/ipv6_sockglue.c2
-rw-r--r--net/ipv6/netfilter/ip6_queue.c12
-rw-r--r--net/ipv6/raw.c4
-rw-r--r--net/ipv6/route.c33
-rw-r--r--net/ipv6/tcp_ipv6.c31
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/irda/irsysctl.c6
-rw-r--r--net/irda/qos.c6
-rw-r--r--net/mac80211/sta_info.c2
-rw-r--r--net/netfilter/nf_conntrack_pptp.c1
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c6
-rw-r--r--net/netfilter/nfnetlink_queue.c4
-rw-r--r--net/netfilter/xt_rateest.c9
-rw-r--r--net/sched/cls_rsvp.h27
-rw-r--r--net/sctp/sm_sideeffect.c5
-rw-r--r--net/sctp/sm_statefuns.c6
30 files changed, 187 insertions, 142 deletions
diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig
index ba6f73eb06c6..a9aff9c7d027 100644
--- a/net/bridge/netfilter/Kconfig
+++ b/net/bridge/netfilter/Kconfig
@@ -4,7 +4,7 @@
4 4
5menuconfig BRIDGE_NF_EBTABLES 5menuconfig BRIDGE_NF_EBTABLES
6 tristate "Ethernet Bridge tables (ebtables) support" 6 tristate "Ethernet Bridge tables (ebtables) support"
7 depends on BRIDGE && BRIDGE_NETFILTER 7 depends on BRIDGE && NETFILTER
8 select NETFILTER_XTABLES 8 select NETFILTER_XTABLES
9 help 9 help
10 ebtables is a general, extensible frame/packet identification 10 ebtables is a general, extensible frame/packet identification
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 7c2fa0a08148..7f9ac0742d19 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -93,10 +93,14 @@ static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
93 caifdevs = caif_device_list(dev_net(dev)); 93 caifdevs = caif_device_list(dev_net(dev));
94 BUG_ON(!caifdevs); 94 BUG_ON(!caifdevs);
95 95
96 caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC); 96 caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
97 if (!caifd) 97 if (!caifd)
98 return NULL; 98 return NULL;
99 caifd->pcpu_refcnt = alloc_percpu(int); 99 caifd->pcpu_refcnt = alloc_percpu(int);
100 if (!caifd->pcpu_refcnt) {
101 kfree(caifd);
102 return NULL;
103 }
100 caifd->netdev = dev; 104 caifd->netdev = dev;
101 dev_hold(dev); 105 dev_hold(dev);
102 return caifd; 106 return caifd;
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 8ce926d3b2cb..9b0c32a2690c 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -857,7 +857,7 @@ static __exit void can_exit(void)
857 struct net_device *dev; 857 struct net_device *dev;
858 858
859 if (stats_timer) 859 if (stats_timer)
860 del_timer(&can_stattimer); 860 del_timer_sync(&can_stattimer);
861 861
862 can_remove_proc(); 862 can_remove_proc();
863 863
diff --git a/net/core/dev.c b/net/core/dev.c
index 17d67b579beb..b10ff0a71855 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1515,6 +1515,14 @@ static inline bool is_skb_forwardable(struct net_device *dev,
1515 */ 1515 */
1516int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 1516int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1517{ 1517{
1518 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1519 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1520 atomic_long_inc(&dev->rx_dropped);
1521 kfree_skb(skb);
1522 return NET_RX_DROP;
1523 }
1524 }
1525
1518 skb_orphan(skb); 1526 skb_orphan(skb);
1519 nf_reset(skb); 1527 nf_reset(skb);
1520 1528
diff --git a/net/core/flow.c b/net/core/flow.c
index bf32c33cad3b..555a456efb07 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -30,6 +30,7 @@ struct flow_cache_entry {
30 struct hlist_node hlist; 30 struct hlist_node hlist;
31 struct list_head gc_list; 31 struct list_head gc_list;
32 } u; 32 } u;
33 struct net *net;
33 u16 family; 34 u16 family;
34 u8 dir; 35 u8 dir;
35 u32 genid; 36 u32 genid;
@@ -172,29 +173,26 @@ static void flow_new_hash_rnd(struct flow_cache *fc,
172 173
173static u32 flow_hash_code(struct flow_cache *fc, 174static u32 flow_hash_code(struct flow_cache *fc,
174 struct flow_cache_percpu *fcp, 175 struct flow_cache_percpu *fcp,
175 const struct flowi *key) 176 const struct flowi *key,
177 size_t keysize)
176{ 178{
177 const u32 *k = (const u32 *) key; 179 const u32 *k = (const u32 *) key;
180 const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32);
178 181
179 return jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd) 182 return jhash2(k, length, fcp->hash_rnd)
180 & (flow_cache_hash_size(fc) - 1); 183 & (flow_cache_hash_size(fc) - 1);
181} 184}
182 185
183typedef unsigned long flow_compare_t;
184
185/* I hear what you're saying, use memcmp. But memcmp cannot make 186/* I hear what you're saying, use memcmp. But memcmp cannot make
186 * important assumptions that we can here, such as alignment and 187 * important assumptions that we can here, such as alignment.
187 * constant size.
188 */ 188 */
189static int flow_key_compare(const struct flowi *key1, const struct flowi *key2) 189static int flow_key_compare(const struct flowi *key1, const struct flowi *key2,
190 size_t keysize)
190{ 191{
191 const flow_compare_t *k1, *k1_lim, *k2; 192 const flow_compare_t *k1, *k1_lim, *k2;
192 const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t);
193
194 BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t));
195 193
196 k1 = (const flow_compare_t *) key1; 194 k1 = (const flow_compare_t *) key1;
197 k1_lim = k1 + n_elem; 195 k1_lim = k1 + keysize;
198 196
199 k2 = (const flow_compare_t *) key2; 197 k2 = (const flow_compare_t *) key2;
200 198
@@ -215,6 +213,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
215 struct flow_cache_entry *fle, *tfle; 213 struct flow_cache_entry *fle, *tfle;
216 struct hlist_node *entry; 214 struct hlist_node *entry;
217 struct flow_cache_object *flo; 215 struct flow_cache_object *flo;
216 size_t keysize;
218 unsigned int hash; 217 unsigned int hash;
219 218
220 local_bh_disable(); 219 local_bh_disable();
@@ -222,6 +221,11 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
222 221
223 fle = NULL; 222 fle = NULL;
224 flo = NULL; 223 flo = NULL;
224
225 keysize = flow_key_size(family);
226 if (!keysize)
227 goto nocache;
228
225 /* Packet really early in init? Making flow_cache_init a 229 /* Packet really early in init? Making flow_cache_init a
226 * pre-smp initcall would solve this. --RR */ 230 * pre-smp initcall would solve this. --RR */
227 if (!fcp->hash_table) 231 if (!fcp->hash_table)
@@ -230,11 +234,12 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
230 if (fcp->hash_rnd_recalc) 234 if (fcp->hash_rnd_recalc)
231 flow_new_hash_rnd(fc, fcp); 235 flow_new_hash_rnd(fc, fcp);
232 236
233 hash = flow_hash_code(fc, fcp, key); 237 hash = flow_hash_code(fc, fcp, key, keysize);
234 hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) { 238 hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) {
235 if (tfle->family == family && 239 if (tfle->net == net &&
240 tfle->family == family &&
236 tfle->dir == dir && 241 tfle->dir == dir &&
237 flow_key_compare(key, &tfle->key) == 0) { 242 flow_key_compare(key, &tfle->key, keysize) == 0) {
238 fle = tfle; 243 fle = tfle;
239 break; 244 break;
240 } 245 }
@@ -246,9 +251,10 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
246 251
247 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); 252 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
248 if (fle) { 253 if (fle) {
254 fle->net = net;
249 fle->family = family; 255 fle->family = family;
250 fle->dir = dir; 256 fle->dir = dir;
251 memcpy(&fle->key, key, sizeof(*key)); 257 memcpy(&fle->key, key, keysize * sizeof(flow_compare_t));
252 fle->object = NULL; 258 fle->object = NULL;
253 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]); 259 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
254 fcp->hash_count++; 260 fcp->hash_count++;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 27002dffe7ed..387703f56fce 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -611,8 +611,21 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
611} 611}
612EXPORT_SYMBOL_GPL(skb_morph); 612EXPORT_SYMBOL_GPL(skb_morph);
613 613
614/* skb frags copy userspace buffers to kernel */ 614/* skb_copy_ubufs - copy userspace skb frags buffers to kernel
615static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 615 * @skb: the skb to modify
616 * @gfp_mask: allocation priority
617 *
618 * This must be called on SKBTX_DEV_ZEROCOPY skb.
619 * It will copy all frags into kernel and drop the reference
620 * to userspace pages.
621 *
622 * If this function is called from an interrupt gfp_mask() must be
623 * %GFP_ATOMIC.
624 *
625 * Returns 0 on success or a negative error code on failure
626 * to allocate kernel memory to copy to.
627 */
628int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
616{ 629{
617 int i; 630 int i;
618 int num_frags = skb_shinfo(skb)->nr_frags; 631 int num_frags = skb_shinfo(skb)->nr_frags;
@@ -652,6 +665,8 @@ static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
652 skb_shinfo(skb)->frags[i - 1].page = head; 665 skb_shinfo(skb)->frags[i - 1].page = head;
653 head = (struct page *)head->private; 666 head = (struct page *)head->private;
654 } 667 }
668
669 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
655 return 0; 670 return 0;
656} 671}
657 672
@@ -677,7 +692,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
677 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 692 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
678 if (skb_copy_ubufs(skb, gfp_mask)) 693 if (skb_copy_ubufs(skb, gfp_mask))
679 return NULL; 694 return NULL;
680 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
681 } 695 }
682 696
683 n = skb + 1; 697 n = skb + 1;
@@ -803,7 +817,6 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
803 n = NULL; 817 n = NULL;
804 goto out; 818 goto out;
805 } 819 }
806 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
807 } 820 }
808 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 821 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
809 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 822 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
@@ -896,7 +909,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
896 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 909 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
897 if (skb_copy_ubufs(skb, gfp_mask)) 910 if (skb_copy_ubufs(skb, gfp_mask))
898 goto nofrags; 911 goto nofrags;
899 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
900 } 912 }
901 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 913 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
902 get_page(skb_shinfo(skb)->frags[i].page); 914 get_page(skb_shinfo(skb)->frags[i].page);
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 27997d35ebd3..a2468363978e 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -340,7 +340,7 @@ void ether_setup(struct net_device *dev)
340 dev->addr_len = ETH_ALEN; 340 dev->addr_len = ETH_ALEN;
341 dev->tx_queue_len = 1000; /* Ethernet wants good queues */ 341 dev->tx_queue_len = 1000; /* Ethernet wants good queues */
342 dev->flags = IFF_BROADCAST|IFF_MULTICAST; 342 dev->flags = IFF_BROADCAST|IFF_MULTICAST;
343 dev->priv_flags = IFF_TX_SKB_SHARING; 343 dev->priv_flags |= IFF_TX_SKB_SHARING;
344 344
345 memset(dev->broadcast, 0xFF, ETH_ALEN); 345 memset(dev->broadcast, 0xFF, ETH_ALEN);
346 346
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 1b745d412cf6..dd2b9478ddd1 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -466,8 +466,13 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
466 goto out; 466 goto out;
467 467
468 if (addr->sin_family != AF_INET) { 468 if (addr->sin_family != AF_INET) {
469 /* Compatibility games : accept AF_UNSPEC (mapped to AF_INET)
470 * only if s_addr is INADDR_ANY.
471 */
469 err = -EAFNOSUPPORT; 472 err = -EAFNOSUPPORT;
470 goto out; 473 if (addr->sin_family != AF_UNSPEC ||
474 addr->sin_addr.s_addr != htonl(INADDR_ANY))
475 goto out;
471 } 476 }
472 477
473 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr); 478 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 33e2c35b74b7..80106d89d548 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -142,6 +142,14 @@ const struct fib_prop fib_props[RTN_MAX + 1] = {
142}; 142};
143 143
144/* Release a nexthop info record */ 144/* Release a nexthop info record */
145static void free_fib_info_rcu(struct rcu_head *head)
146{
147 struct fib_info *fi = container_of(head, struct fib_info, rcu);
148
149 if (fi->fib_metrics != (u32 *) dst_default_metrics)
150 kfree(fi->fib_metrics);
151 kfree(fi);
152}
145 153
146void free_fib_info(struct fib_info *fi) 154void free_fib_info(struct fib_info *fi)
147{ 155{
@@ -156,7 +164,7 @@ void free_fib_info(struct fib_info *fi)
156 } endfor_nexthops(fi); 164 } endfor_nexthops(fi);
157 fib_info_cnt--; 165 fib_info_cnt--;
158 release_net(fi->fib_net); 166 release_net(fi->fib_net);
159 kfree_rcu(fi, rcu); 167 call_rcu(&fi->rcu, free_fib_info_rcu);
160} 168}
161 169
162void fib_release_info(struct fib_info *fi) 170void fib_release_info(struct fib_info *fi)
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index 5c9b9d963918..e59aabd0eae4 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -218,6 +218,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
218 return skb; 218 return skb;
219 219
220nlmsg_failure: 220nlmsg_failure:
221 kfree_skb(skb);
221 *errp = -EINVAL; 222 *errp = -EINVAL;
222 printk(KERN_ERR "ip_queue: error creating packet message\n"); 223 printk(KERN_ERR "ip_queue: error creating packet message\n");
223 return NULL; 224 return NULL;
@@ -313,7 +314,7 @@ ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
313{ 314{
314 struct nf_queue_entry *entry; 315 struct nf_queue_entry *entry;
315 316
316 if (vmsg->value > NF_MAX_VERDICT) 317 if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN)
317 return -EINVAL; 318 return -EINVAL;
318 319
319 entry = ipq_find_dequeue_entry(vmsg->id); 320 entry = ipq_find_dequeue_entry(vmsg->id);
@@ -358,12 +359,9 @@ ipq_receive_peer(struct ipq_peer_msg *pmsg,
358 break; 359 break;
359 360
360 case IPQM_VERDICT: 361 case IPQM_VERDICT:
361 if (pmsg->msg.verdict.value > NF_MAX_VERDICT) 362 status = ipq_set_verdict(&pmsg->msg.verdict,
362 status = -EINVAL; 363 len - sizeof(*pmsg));
363 else 364 break;
364 status = ipq_set_verdict(&pmsg->msg.verdict,
365 len - sizeof(*pmsg));
366 break;
367 default: 365 default:
368 status = -EINVAL; 366 status = -EINVAL;
369 } 367 }
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index b14ec7d03b6e..4bfad5da94f4 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -254,6 +254,8 @@ static const struct snmp_mib snmp4_net_list[] = {
254 SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP), 254 SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP),
255 SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER), 255 SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER),
256 SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW), 256 SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW),
257 SNMP_MIB_ITEM("TCPReqQFullDoCookies", LINUX_MIB_TCPREQQFULLDOCOOKIES),
258 SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP),
257 SNMP_MIB_SENTINEL 259 SNMP_MIB_SENTINEL
258}; 260};
259 261
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 1c12b8ec849d..c34f01513945 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -808,20 +808,38 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
808 kfree(inet_rsk(req)->opt); 808 kfree(inet_rsk(req)->opt);
809} 809}
810 810
811static void syn_flood_warning(const struct sk_buff *skb) 811/*
812 * Return 1 if a syncookie should be sent
813 */
814int tcp_syn_flood_action(struct sock *sk,
815 const struct sk_buff *skb,
816 const char *proto)
812{ 817{
813 const char *msg; 818 const char *msg = "Dropping request";
819 int want_cookie = 0;
820 struct listen_sock *lopt;
821
822
814 823
815#ifdef CONFIG_SYN_COOKIES 824#ifdef CONFIG_SYN_COOKIES
816 if (sysctl_tcp_syncookies) 825 if (sysctl_tcp_syncookies) {
817 msg = "Sending cookies"; 826 msg = "Sending cookies";
818 else 827 want_cookie = 1;
828 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
829 } else
819#endif 830#endif
820 msg = "Dropping request"; 831 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
821 832
822 pr_info("TCP: Possible SYN flooding on port %d. %s.\n", 833 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
823 ntohs(tcp_hdr(skb)->dest), msg); 834 if (!lopt->synflood_warned) {
835 lopt->synflood_warned = 1;
836 pr_info("%s: Possible SYN flooding on port %d. %s. "
837 " Check SNMP counters.\n",
838 proto, ntohs(tcp_hdr(skb)->dest), msg);
839 }
840 return want_cookie;
824} 841}
842EXPORT_SYMBOL(tcp_syn_flood_action);
825 843
826/* 844/*
827 * Save and compile IPv4 options into the request_sock if needed. 845 * Save and compile IPv4 options into the request_sock if needed.
@@ -1235,11 +1253,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1235 __be32 saddr = ip_hdr(skb)->saddr; 1253 __be32 saddr = ip_hdr(skb)->saddr;
1236 __be32 daddr = ip_hdr(skb)->daddr; 1254 __be32 daddr = ip_hdr(skb)->daddr;
1237 __u32 isn = TCP_SKB_CB(skb)->when; 1255 __u32 isn = TCP_SKB_CB(skb)->when;
1238#ifdef CONFIG_SYN_COOKIES
1239 int want_cookie = 0; 1256 int want_cookie = 0;
1240#else
1241#define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1242#endif
1243 1257
1244 /* Never answer to SYNs send to broadcast or multicast */ 1258 /* Never answer to SYNs send to broadcast or multicast */
1245 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) 1259 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@@ -1250,14 +1264,9 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1250 * evidently real one. 1264 * evidently real one.
1251 */ 1265 */
1252 if (inet_csk_reqsk_queue_is_full(sk) && !isn) { 1266 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1253 if (net_ratelimit()) 1267 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1254 syn_flood_warning(skb); 1268 if (!want_cookie)
1255#ifdef CONFIG_SYN_COOKIES 1269 goto drop;
1256 if (sysctl_tcp_syncookies) {
1257 want_cookie = 1;
1258 } else
1259#endif
1260 goto drop;
1261 } 1270 }
1262 1271
1263 /* Accept backlog is full. If we have already queued enough 1272 /* Accept backlog is full. If we have already queued enough
@@ -1303,9 +1312,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1303 while (l-- > 0) 1312 while (l-- > 0)
1304 *c++ ^= *hash_location++; 1313 *c++ ^= *hash_location++;
1305 1314
1306#ifdef CONFIG_SYN_COOKIES
1307 want_cookie = 0; /* not our kind of cookie */ 1315 want_cookie = 0; /* not our kind of cookie */
1308#endif
1309 tmp_ext.cookie_out_never = 0; /* false */ 1316 tmp_ext.cookie_out_never = 0; /* false */
1310 tmp_ext.cookie_plus = tmp_opt.cookie_plus; 1317 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1311 } else if (!tp->rx_opt.cookie_in_always) { 1318 } else if (!tp->rx_opt.cookie_in_always) {
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 9ef1831746ef..b46e9f88ce37 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -599,7 +599,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
599 return 0; 599 return 0;
600} 600}
601 601
602int datagram_send_ctl(struct net *net, 602int datagram_send_ctl(struct net *net, struct sock *sk,
603 struct msghdr *msg, struct flowi6 *fl6, 603 struct msghdr *msg, struct flowi6 *fl6,
604 struct ipv6_txoptions *opt, 604 struct ipv6_txoptions *opt,
605 int *hlimit, int *tclass, int *dontfrag) 605 int *hlimit, int *tclass, int *dontfrag)
@@ -658,7 +658,8 @@ int datagram_send_ctl(struct net *net,
658 658
659 if (addr_type != IPV6_ADDR_ANY) { 659 if (addr_type != IPV6_ADDR_ANY) {
660 int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL; 660 int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL;
661 if (!ipv6_chk_addr(net, &src_info->ipi6_addr, 661 if (!inet_sk(sk)->transparent &&
662 !ipv6_chk_addr(net, &src_info->ipi6_addr,
662 strict ? dev : NULL, 0)) 663 strict ? dev : NULL, 0))
663 err = -EINVAL; 664 err = -EINVAL;
664 else 665 else
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index f3caf1b8d572..543039450193 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -322,8 +322,8 @@ static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned lo
322} 322}
323 323
324static struct ip6_flowlabel * 324static struct ip6_flowlabel *
325fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval, 325fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
326 int optlen, int *err_p) 326 char __user *optval, int optlen, int *err_p)
327{ 327{
328 struct ip6_flowlabel *fl = NULL; 328 struct ip6_flowlabel *fl = NULL;
329 int olen; 329 int olen;
@@ -360,7 +360,7 @@ fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval,
360 msg.msg_control = (void*)(fl->opt+1); 360 msg.msg_control = (void*)(fl->opt+1);
361 memset(&flowi6, 0, sizeof(flowi6)); 361 memset(&flowi6, 0, sizeof(flowi6));
362 362
363 err = datagram_send_ctl(net, &msg, &flowi6, fl->opt, &junk, 363 err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk,
364 &junk, &junk); 364 &junk, &junk);
365 if (err) 365 if (err)
366 goto done; 366 goto done;
@@ -528,7 +528,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
528 if (freq.flr_label & ~IPV6_FLOWLABEL_MASK) 528 if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
529 return -EINVAL; 529 return -EINVAL;
530 530
531 fl = fl_create(net, &freq, optval, optlen, &err); 531 fl = fl_create(net, sk, &freq, optval, optlen, &err);
532 if (fl == NULL) 532 if (fl == NULL)
533 return err; 533 return err;
534 sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL); 534 sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 147ede38ab48..2fbda5fc4cc4 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -475,7 +475,7 @@ sticky_done:
475 msg.msg_controllen = optlen; 475 msg.msg_controllen = optlen;
476 msg.msg_control = (void*)(opt+1); 476 msg.msg_control = (void*)(opt+1);
477 477
478 retv = datagram_send_ctl(net, &msg, &fl6, opt, &junk, &junk, 478 retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk,
479 &junk); 479 &junk);
480 if (retv) 480 if (retv)
481 goto done; 481 goto done;
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 249394863284..e63c3972a739 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -218,6 +218,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
218 return skb; 218 return skb;
219 219
220nlmsg_failure: 220nlmsg_failure:
221 kfree_skb(skb);
221 *errp = -EINVAL; 222 *errp = -EINVAL;
222 printk(KERN_ERR "ip6_queue: error creating packet message\n"); 223 printk(KERN_ERR "ip6_queue: error creating packet message\n");
223 return NULL; 224 return NULL;
@@ -313,7 +314,7 @@ ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
313{ 314{
314 struct nf_queue_entry *entry; 315 struct nf_queue_entry *entry;
315 316
316 if (vmsg->value > NF_MAX_VERDICT) 317 if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN)
317 return -EINVAL; 318 return -EINVAL;
318 319
319 entry = ipq_find_dequeue_entry(vmsg->id); 320 entry = ipq_find_dequeue_entry(vmsg->id);
@@ -358,12 +359,9 @@ ipq_receive_peer(struct ipq_peer_msg *pmsg,
358 break; 359 break;
359 360
360 case IPQM_VERDICT: 361 case IPQM_VERDICT:
361 if (pmsg->msg.verdict.value > NF_MAX_VERDICT) 362 status = ipq_set_verdict(&pmsg->msg.verdict,
362 status = -EINVAL; 363 len - sizeof(*pmsg));
363 else 364 break;
364 status = ipq_set_verdict(&pmsg->msg.verdict,
365 len - sizeof(*pmsg));
366 break;
367 default: 365 default:
368 status = -EINVAL; 366 status = -EINVAL;
369 } 367 }
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 6a79f3081bdb..343852e5c703 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -817,8 +817,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
817 memset(opt, 0, sizeof(struct ipv6_txoptions)); 817 memset(opt, 0, sizeof(struct ipv6_txoptions));
818 opt->tot_len = sizeof(struct ipv6_txoptions); 818 opt->tot_len = sizeof(struct ipv6_txoptions);
819 819
820 err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit, 820 err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
821 &tclass, &dontfrag); 821 &hlimit, &tclass, &dontfrag);
822 if (err < 0) { 822 if (err < 0) {
823 fl6_sock_release(flowlabel); 823 fl6_sock_release(flowlabel);
824 return err; 824 return err;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 9e69eb0ec6dd..1250f9020670 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -104,6 +104,9 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
104 struct inet_peer *peer; 104 struct inet_peer *peer;
105 u32 *p = NULL; 105 u32 *p = NULL;
106 106
107 if (!(rt->dst.flags & DST_HOST))
108 return NULL;
109
107 if (!rt->rt6i_peer) 110 if (!rt->rt6i_peer)
108 rt6_bind_peer(rt, 1); 111 rt6_bind_peer(rt, 1);
109 112
@@ -252,6 +255,9 @@ static void ip6_dst_destroy(struct dst_entry *dst)
252 struct inet6_dev *idev = rt->rt6i_idev; 255 struct inet6_dev *idev = rt->rt6i_idev;
253 struct inet_peer *peer = rt->rt6i_peer; 256 struct inet_peer *peer = rt->rt6i_peer;
254 257
258 if (!(rt->dst.flags & DST_HOST))
259 dst_destroy_metrics_generic(dst);
260
255 if (idev != NULL) { 261 if (idev != NULL) {
256 rt->rt6i_idev = NULL; 262 rt->rt6i_idev = NULL;
257 in6_dev_put(idev); 263 in6_dev_put(idev);
@@ -723,9 +729,7 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,
723 ipv6_addr_copy(&rt->rt6i_gateway, daddr); 729 ipv6_addr_copy(&rt->rt6i_gateway, daddr);
724 } 730 }
725 731
726 rt->rt6i_dst.plen = 128;
727 rt->rt6i_flags |= RTF_CACHE; 732 rt->rt6i_flags |= RTF_CACHE;
728 rt->dst.flags |= DST_HOST;
729 733
730#ifdef CONFIG_IPV6_SUBTREES 734#ifdef CONFIG_IPV6_SUBTREES
731 if (rt->rt6i_src.plen && saddr) { 735 if (rt->rt6i_src.plen && saddr) {
@@ -775,9 +779,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
775 struct rt6_info *rt = ip6_rt_copy(ort, daddr); 779 struct rt6_info *rt = ip6_rt_copy(ort, daddr);
776 780
777 if (rt) { 781 if (rt) {
778 rt->rt6i_dst.plen = 128;
779 rt->rt6i_flags |= RTF_CACHE; 782 rt->rt6i_flags |= RTF_CACHE;
780 rt->dst.flags |= DST_HOST;
781 dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_raw(&ort->dst))); 783 dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_raw(&ort->dst)));
782 } 784 }
783 return rt; 785 return rt;
@@ -1078,12 +1080,15 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1078 neigh = NULL; 1080 neigh = NULL;
1079 } 1081 }
1080 1082
1081 rt->rt6i_idev = idev; 1083 rt->dst.flags |= DST_HOST;
1084 rt->dst.output = ip6_output;
1082 dst_set_neighbour(&rt->dst, neigh); 1085 dst_set_neighbour(&rt->dst, neigh);
1083 atomic_set(&rt->dst.__refcnt, 1); 1086 atomic_set(&rt->dst.__refcnt, 1);
1084 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
1085 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255); 1087 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
1086 rt->dst.output = ip6_output; 1088
1089 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
1090 rt->rt6i_dst.plen = 128;
1091 rt->rt6i_idev = idev;
1087 1092
1088 spin_lock_bh(&icmp6_dst_lock); 1093 spin_lock_bh(&icmp6_dst_lock);
1089 rt->dst.next = icmp6_dst_gc_list; 1094 rt->dst.next = icmp6_dst_gc_list;
@@ -1261,6 +1266,14 @@ int ip6_route_add(struct fib6_config *cfg)
1261 if (rt->rt6i_dst.plen == 128) 1266 if (rt->rt6i_dst.plen == 128)
1262 rt->dst.flags |= DST_HOST; 1267 rt->dst.flags |= DST_HOST;
1263 1268
1269 if (!(rt->dst.flags & DST_HOST) && cfg->fc_mx) {
1270 u32 *metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
1271 if (!metrics) {
1272 err = -ENOMEM;
1273 goto out;
1274 }
1275 dst_init_metrics(&rt->dst, metrics, 0);
1276 }
1264#ifdef CONFIG_IPV6_SUBTREES 1277#ifdef CONFIG_IPV6_SUBTREES
1265 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len); 1278 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1266 rt->rt6i_src.plen = cfg->fc_src_len; 1279 rt->rt6i_src.plen = cfg->fc_src_len;
@@ -1607,9 +1620,6 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
1607 if (on_link) 1620 if (on_link)
1608 nrt->rt6i_flags &= ~RTF_GATEWAY; 1621 nrt->rt6i_flags &= ~RTF_GATEWAY;
1609 1622
1610 nrt->rt6i_dst.plen = 128;
1611 nrt->dst.flags |= DST_HOST;
1612
1613 ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key); 1623 ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
1614 dst_set_neighbour(&nrt->dst, neigh_clone(neigh)); 1624 dst_set_neighbour(&nrt->dst, neigh_clone(neigh));
1615 1625
@@ -1754,9 +1764,10 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
1754 if (rt) { 1764 if (rt) {
1755 rt->dst.input = ort->dst.input; 1765 rt->dst.input = ort->dst.input;
1756 rt->dst.output = ort->dst.output; 1766 rt->dst.output = ort->dst.output;
1767 rt->dst.flags |= DST_HOST;
1757 1768
1758 ipv6_addr_copy(&rt->rt6i_dst.addr, dest); 1769 ipv6_addr_copy(&rt->rt6i_dst.addr, dest);
1759 rt->rt6i_dst.plen = ort->rt6i_dst.plen; 1770 rt->rt6i_dst.plen = 128;
1760 dst_copy_metrics(&rt->dst, &ort->dst); 1771 dst_copy_metrics(&rt->dst, &ort->dst);
1761 rt->dst.error = ort->dst.error; 1772 rt->dst.error = ort->dst.error;
1762 rt->rt6i_idev = ort->rt6i_idev; 1773 rt->rt6i_idev = ort->rt6i_idev;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index d1fb63f4aeb7..3c9fa618b69d 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -531,20 +531,6 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
531 return tcp_v6_send_synack(sk, req, rvp); 531 return tcp_v6_send_synack(sk, req, rvp);
532} 532}
533 533
534static inline void syn_flood_warning(struct sk_buff *skb)
535{
536#ifdef CONFIG_SYN_COOKIES
537 if (sysctl_tcp_syncookies)
538 printk(KERN_INFO
539 "TCPv6: Possible SYN flooding on port %d. "
540 "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
541 else
542#endif
543 printk(KERN_INFO
544 "TCPv6: Possible SYN flooding on port %d. "
545 "Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
546}
547
548static void tcp_v6_reqsk_destructor(struct request_sock *req) 534static void tcp_v6_reqsk_destructor(struct request_sock *req)
549{ 535{
550 kfree_skb(inet6_rsk(req)->pktopts); 536 kfree_skb(inet6_rsk(req)->pktopts);
@@ -1179,11 +1165,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1179 struct tcp_sock *tp = tcp_sk(sk); 1165 struct tcp_sock *tp = tcp_sk(sk);
1180 __u32 isn = TCP_SKB_CB(skb)->when; 1166 __u32 isn = TCP_SKB_CB(skb)->when;
1181 struct dst_entry *dst = NULL; 1167 struct dst_entry *dst = NULL;
1182#ifdef CONFIG_SYN_COOKIES
1183 int want_cookie = 0; 1168 int want_cookie = 0;
1184#else
1185#define want_cookie 0
1186#endif
1187 1169
1188 if (skb->protocol == htons(ETH_P_IP)) 1170 if (skb->protocol == htons(ETH_P_IP))
1189 return tcp_v4_conn_request(sk, skb); 1171 return tcp_v4_conn_request(sk, skb);
@@ -1192,14 +1174,9 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1192 goto drop; 1174 goto drop;
1193 1175
1194 if (inet_csk_reqsk_queue_is_full(sk) && !isn) { 1176 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1195 if (net_ratelimit()) 1177 want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
1196 syn_flood_warning(skb); 1178 if (!want_cookie)
1197#ifdef CONFIG_SYN_COOKIES 1179 goto drop;
1198 if (sysctl_tcp_syncookies)
1199 want_cookie = 1;
1200 else
1201#endif
1202 goto drop;
1203 } 1180 }
1204 1181
1205 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) 1182 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
@@ -1249,9 +1226,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1249 while (l-- > 0) 1226 while (l-- > 0)
1250 *c++ ^= *hash_location++; 1227 *c++ ^= *hash_location++;
1251 1228
1252#ifdef CONFIG_SYN_COOKIES
1253 want_cookie = 0; /* not our kind of cookie */ 1229 want_cookie = 0; /* not our kind of cookie */
1254#endif
1255 tmp_ext.cookie_out_never = 0; /* false */ 1230 tmp_ext.cookie_out_never = 0; /* false */
1256 tmp_ext.cookie_plus = tmp_opt.cookie_plus; 1231 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1257 } else if (!tp->rx_opt.cookie_in_always) { 1232 } else if (!tp->rx_opt.cookie_in_always) {
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 29213b51c499..bb95e8e1c6f9 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1090,8 +1090,8 @@ do_udp_sendmsg:
1090 memset(opt, 0, sizeof(struct ipv6_txoptions)); 1090 memset(opt, 0, sizeof(struct ipv6_txoptions));
1091 opt->tot_len = sizeof(*opt); 1091 opt->tot_len = sizeof(*opt);
1092 1092
1093 err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit, 1093 err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
1094 &tclass, &dontfrag); 1094 &hlimit, &tclass, &dontfrag);
1095 if (err < 0) { 1095 if (err < 0) {
1096 fl6_sock_release(flowlabel); 1096 fl6_sock_release(flowlabel);
1097 return err; 1097 return err;
diff --git a/net/irda/irsysctl.c b/net/irda/irsysctl.c
index d0b70dadf73b..2615ffc8e785 100644
--- a/net/irda/irsysctl.c
+++ b/net/irda/irsysctl.c
@@ -40,9 +40,9 @@ extern int sysctl_slot_timeout;
40extern int sysctl_fast_poll_increase; 40extern int sysctl_fast_poll_increase;
41extern char sysctl_devname[]; 41extern char sysctl_devname[];
42extern int sysctl_max_baud_rate; 42extern int sysctl_max_baud_rate;
43extern int sysctl_min_tx_turn_time; 43extern unsigned int sysctl_min_tx_turn_time;
44extern int sysctl_max_tx_data_size; 44extern unsigned int sysctl_max_tx_data_size;
45extern int sysctl_max_tx_window; 45extern unsigned int sysctl_max_tx_window;
46extern int sysctl_max_noreply_time; 46extern int sysctl_max_noreply_time;
47extern int sysctl_warn_noreply_time; 47extern int sysctl_warn_noreply_time;
48extern int sysctl_lap_keepalive_time; 48extern int sysctl_lap_keepalive_time;
diff --git a/net/irda/qos.c b/net/irda/qos.c
index 1b51bcf42394..4369f7f41bcb 100644
--- a/net/irda/qos.c
+++ b/net/irda/qos.c
@@ -60,7 +60,7 @@ int sysctl_max_noreply_time = 12;
60 * Default is 10us which means using the unmodified value given by the 60 * Default is 10us which means using the unmodified value given by the
61 * peer except if it's 0 (0 is likely a bug in the other stack). 61 * peer except if it's 0 (0 is likely a bug in the other stack).
62 */ 62 */
63unsigned sysctl_min_tx_turn_time = 10; 63unsigned int sysctl_min_tx_turn_time = 10;
64/* 64/*
65 * Maximum data size to be used in transmission in payload of LAP frame. 65 * Maximum data size to be used in transmission in payload of LAP frame.
66 * There is a bit of confusion in the IrDA spec : 66 * There is a bit of confusion in the IrDA spec :
@@ -75,13 +75,13 @@ unsigned sysctl_min_tx_turn_time = 10;
75 * bytes frames or all negotiated frame sizes, but you can use the sysctl 75 * bytes frames or all negotiated frame sizes, but you can use the sysctl
76 * to play with this value anyway. 76 * to play with this value anyway.
77 * Jean II */ 77 * Jean II */
78unsigned sysctl_max_tx_data_size = 2042; 78unsigned int sysctl_max_tx_data_size = 2042;
79/* 79/*
80 * Maximum transmit window, i.e. number of LAP frames between turn-around. 80 * Maximum transmit window, i.e. number of LAP frames between turn-around.
81 * This allow to override what the peer told us. Some peers are buggy and 81 * This allow to override what the peer told us. Some peers are buggy and
82 * don't always support what they tell us. 82 * don't always support what they tell us.
83 * Jean II */ 83 * Jean II */
84unsigned sysctl_max_tx_window = 7; 84unsigned int sysctl_max_tx_window = 7;
85 85
86static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get); 86static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get);
87static int irlap_param_link_disconnect(void *instance, irda_param_t *parm, 87static int irlap_param_link_disconnect(void *instance, irda_param_t *parm,
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 3db78b696c5c..21070e9bc8d0 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -665,7 +665,7 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
665 BUG_ON(!sdata->bss); 665 BUG_ON(!sdata->bss);
666 666
667 atomic_dec(&sdata->bss->num_sta_ps); 667 atomic_dec(&sdata->bss->num_sta_ps);
668 __sta_info_clear_tim_bit(sdata->bss, sta); 668 sta_info_clear_tim_bit(sta);
669 } 669 }
670 670
671 local->num_sta--; 671 local->num_sta--;
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index 2fd4565144de..31d56b23b9e9 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -364,6 +364,7 @@ pptp_inbound_pkt(struct sk_buff *skb,
364 break; 364 break;
365 365
366 case PPTP_WAN_ERROR_NOTIFY: 366 case PPTP_WAN_ERROR_NOTIFY:
367 case PPTP_SET_LINK_INFO:
367 case PPTP_ECHO_REQUEST: 368 case PPTP_ECHO_REQUEST:
368 case PPTP_ECHO_REPLY: 369 case PPTP_ECHO_REPLY:
369 /* I don't have to explain these ;) */ 370 /* I don't have to explain these ;) */
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 37bf94394be0..8235b86b4e87 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -409,7 +409,7 @@ static void tcp_options(const struct sk_buff *skb,
409 if (opsize < 2) /* "silly options" */ 409 if (opsize < 2) /* "silly options" */
410 return; 410 return;
411 if (opsize > length) 411 if (opsize > length)
412 break; /* don't parse partial options */ 412 return; /* don't parse partial options */
413 413
414 if (opcode == TCPOPT_SACK_PERM 414 if (opcode == TCPOPT_SACK_PERM
415 && opsize == TCPOLEN_SACK_PERM) 415 && opsize == TCPOLEN_SACK_PERM)
@@ -447,7 +447,7 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
447 BUG_ON(ptr == NULL); 447 BUG_ON(ptr == NULL);
448 448
449 /* Fast path for timestamp-only option */ 449 /* Fast path for timestamp-only option */
450 if (length == TCPOLEN_TSTAMP_ALIGNED*4 450 if (length == TCPOLEN_TSTAMP_ALIGNED
451 && *(__be32 *)ptr == htonl((TCPOPT_NOP << 24) 451 && *(__be32 *)ptr == htonl((TCPOPT_NOP << 24)
452 | (TCPOPT_NOP << 16) 452 | (TCPOPT_NOP << 16)
453 | (TCPOPT_TIMESTAMP << 8) 453 | (TCPOPT_TIMESTAMP << 8)
@@ -469,7 +469,7 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
469 if (opsize < 2) /* "silly options" */ 469 if (opsize < 2) /* "silly options" */
470 return; 470 return;
471 if (opsize > length) 471 if (opsize > length)
472 break; /* don't parse partial options */ 472 return; /* don't parse partial options */
473 473
474 if (opcode == TCPOPT_SACK 474 if (opcode == TCPOPT_SACK
475 && opsize >= (TCPOLEN_SACK_BASE 475 && opsize >= (TCPOLEN_SACK_BASE
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 00bd475eab4b..a80b0cb03f17 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -646,8 +646,8 @@ verdicthdr_get(const struct nlattr * const nfqa[])
646 return NULL; 646 return NULL;
647 647
648 vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]); 648 vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
649 verdict = ntohl(vhdr->verdict); 649 verdict = ntohl(vhdr->verdict) & NF_VERDICT_MASK;
650 if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) 650 if (verdict > NF_MAX_VERDICT || verdict == NF_STOLEN)
651 return NULL; 651 return NULL;
652 return vhdr; 652 return vhdr;
653} 653}
diff --git a/net/netfilter/xt_rateest.c b/net/netfilter/xt_rateest.c
index 76a083184d8e..ed0db15ab00e 100644
--- a/net/netfilter/xt_rateest.c
+++ b/net/netfilter/xt_rateest.c
@@ -78,7 +78,7 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
78{ 78{
79 struct xt_rateest_match_info *info = par->matchinfo; 79 struct xt_rateest_match_info *info = par->matchinfo;
80 struct xt_rateest *est1, *est2; 80 struct xt_rateest *est1, *est2;
81 int ret = false; 81 int ret = -EINVAL;
82 82
83 if (hweight32(info->flags & (XT_RATEEST_MATCH_ABS | 83 if (hweight32(info->flags & (XT_RATEEST_MATCH_ABS |
84 XT_RATEEST_MATCH_REL)) != 1) 84 XT_RATEEST_MATCH_REL)) != 1)
@@ -101,13 +101,12 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
101 if (!est1) 101 if (!est1)
102 goto err1; 102 goto err1;
103 103
104 est2 = NULL;
104 if (info->flags & XT_RATEEST_MATCH_REL) { 105 if (info->flags & XT_RATEEST_MATCH_REL) {
105 est2 = xt_rateest_lookup(info->name2); 106 est2 = xt_rateest_lookup(info->name2);
106 if (!est2) 107 if (!est2)
107 goto err2; 108 goto err2;
108 } else 109 }
109 est2 = NULL;
110
111 110
112 info->est1 = est1; 111 info->est1 = est1;
113 info->est2 = est2; 112 info->est2 = est2;
@@ -116,7 +115,7 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
116err2: 115err2:
117 xt_rateest_put(est1); 116 xt_rateest_put(est1);
118err1: 117err1:
119 return -EINVAL; 118 return ret;
120} 119}
121 120
122static void xt_rateest_mt_destroy(const struct xt_mtdtor_param *par) 121static void xt_rateest_mt_destroy(const struct xt_mtdtor_param *par)
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index be4505ee67a9..b01427924f81 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -425,7 +425,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
425 struct rsvp_filter *f, **fp; 425 struct rsvp_filter *f, **fp;
426 struct rsvp_session *s, **sp; 426 struct rsvp_session *s, **sp;
427 struct tc_rsvp_pinfo *pinfo = NULL; 427 struct tc_rsvp_pinfo *pinfo = NULL;
428 struct nlattr *opt = tca[TCA_OPTIONS-1]; 428 struct nlattr *opt = tca[TCA_OPTIONS];
429 struct nlattr *tb[TCA_RSVP_MAX + 1]; 429 struct nlattr *tb[TCA_RSVP_MAX + 1];
430 struct tcf_exts e; 430 struct tcf_exts e;
431 unsigned int h1, h2; 431 unsigned int h1, h2;
@@ -439,7 +439,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
439 if (err < 0) 439 if (err < 0)
440 return err; 440 return err;
441 441
442 err = tcf_exts_validate(tp, tb, tca[TCA_RATE-1], &e, &rsvp_ext_map); 442 err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &rsvp_ext_map);
443 if (err < 0) 443 if (err < 0)
444 return err; 444 return err;
445 445
@@ -449,8 +449,8 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
449 449
450 if (f->handle != handle && handle) 450 if (f->handle != handle && handle)
451 goto errout2; 451 goto errout2;
452 if (tb[TCA_RSVP_CLASSID-1]) { 452 if (tb[TCA_RSVP_CLASSID]) {
453 f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID-1]); 453 f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
454 tcf_bind_filter(tp, &f->res, base); 454 tcf_bind_filter(tp, &f->res, base);
455 } 455 }
456 456
@@ -462,7 +462,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
462 err = -EINVAL; 462 err = -EINVAL;
463 if (handle) 463 if (handle)
464 goto errout2; 464 goto errout2;
465 if (tb[TCA_RSVP_DST-1] == NULL) 465 if (tb[TCA_RSVP_DST] == NULL)
466 goto errout2; 466 goto errout2;
467 467
468 err = -ENOBUFS; 468 err = -ENOBUFS;
@@ -471,19 +471,19 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
471 goto errout2; 471 goto errout2;
472 472
473 h2 = 16; 473 h2 = 16;
474 if (tb[TCA_RSVP_SRC-1]) { 474 if (tb[TCA_RSVP_SRC]) {
475 memcpy(f->src, nla_data(tb[TCA_RSVP_SRC-1]), sizeof(f->src)); 475 memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src));
476 h2 = hash_src(f->src); 476 h2 = hash_src(f->src);
477 } 477 }
478 if (tb[TCA_RSVP_PINFO-1]) { 478 if (tb[TCA_RSVP_PINFO]) {
479 pinfo = nla_data(tb[TCA_RSVP_PINFO-1]); 479 pinfo = nla_data(tb[TCA_RSVP_PINFO]);
480 f->spi = pinfo->spi; 480 f->spi = pinfo->spi;
481 f->tunnelhdr = pinfo->tunnelhdr; 481 f->tunnelhdr = pinfo->tunnelhdr;
482 } 482 }
483 if (tb[TCA_RSVP_CLASSID-1]) 483 if (tb[TCA_RSVP_CLASSID])
484 f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID-1]); 484 f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
485 485
486 dst = nla_data(tb[TCA_RSVP_DST-1]); 486 dst = nla_data(tb[TCA_RSVP_DST]);
487 h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0); 487 h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0);
488 488
489 err = -ENOMEM; 489 err = -ENOMEM;
@@ -642,8 +642,7 @@ nla_put_failure:
642 return -1; 642 return -1;
643} 643}
644 644
645static struct tcf_proto_ops RSVP_OPS = { 645static struct tcf_proto_ops RSVP_OPS __read_mostly = {
646 .next = NULL,
647 .kind = RSVP_ID, 646 .kind = RSVP_ID,
648 .classify = rsvp_classify, 647 .classify = rsvp_classify,
649 .init = rsvp_init, 648 .init = rsvp_init,
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 167c880cf8da..76388b083f28 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1689,6 +1689,11 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1689 case SCTP_CMD_PURGE_ASCONF_QUEUE: 1689 case SCTP_CMD_PURGE_ASCONF_QUEUE:
1690 sctp_asconf_queue_teardown(asoc); 1690 sctp_asconf_queue_teardown(asoc);
1691 break; 1691 break;
1692
1693 case SCTP_CMD_SET_ASOC:
1694 asoc = cmd->obj.asoc;
1695 break;
1696
1692 default: 1697 default:
1693 pr_warn("Impossible command: %u, %p\n", 1698 pr_warn("Impossible command: %u, %p\n",
1694 cmd->verb, cmd->obj.ptr); 1699 cmd->verb, cmd->obj.ptr);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 49b847b00f99..a0f31e6c1c63 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -2047,6 +2047,12 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
2047 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); 2047 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
2048 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); 2048 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
2049 2049
2050 /* Restore association pointer to provide SCTP command interpeter
2051 * with a valid context in case it needs to manipulate
2052 * the queues */
2053 sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC,
2054 SCTP_ASOC((struct sctp_association *)asoc));
2055
2050 return retval; 2056 return retval;
2051 2057
2052nomem: 2058nomem: