aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-03-05 05:45:22 -0500
committerIngo Molnar <mingo@elte.hu>2009-03-05 05:45:22 -0500
commita140feab42d1cfd811930ab76104559c19dfc4b0 (patch)
tree41fd871990e888dd5616a6bf1891a1ff307221df /net
parent1075414b06109a99b0e87601e84c74a95bd45681 (diff)
parentfec6c6fec3e20637bee5d276fb61dd8b49a3f9cc (diff)
Merge commit 'v2.6.29-rc7' into core/locking
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_core.c10
-rw-r--r--net/9p/protocol.c22
-rw-r--r--net/bridge/br_forward.c7
-rw-r--r--net/core/dev.c10
-rw-r--r--net/core/neighbour.c14
-rw-r--r--net/core/net_namespace.c86
-rw-r--r--net/core/skbuff.c8
-rw-r--r--net/core/sock.c3
-rw-r--r--net/ipv4/cipso_ipv4.c9
-rw-r--r--net/ipv4/tcp_input.c9
-rw-r--r--net/ipv4/tcp_output.c1
-rw-r--r--net/ipv4/tcp_scalable.c2
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv6/inet6_hashtables.c4
-rw-r--r--net/ipv6/ip6_flowlabel.c8
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c30
-rw-r--r--net/mac80211/tx.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c15
-rw-r--r--net/netfilter/nfnetlink_log.c8
-rw-r--r--net/netfilter/x_tables.c199
-rw-r--r--net/netfilter/xt_recent.c2
-rw-r--r--net/netfilter/xt_sctp.c2
-rw-r--r--net/phonet/pep-gprs.c1
-rw-r--r--net/phonet/pep.c2
-rw-r--r--net/rxrpc/af_rxrpc.c5
-rw-r--r--net/sched/sch_drr.c6
-rw-r--r--net/wimax/id-table.c9
28 files changed, 332 insertions, 150 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index e9db889d6222..2886d2fb9ab5 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -1,12 +1,16 @@
1#include <linux/skbuff.h> 1#include <linux/skbuff.h>
2#include <linux/netdevice.h> 2#include <linux/netdevice.h>
3#include <linux/if_vlan.h> 3#include <linux/if_vlan.h>
4#include <linux/netpoll.h>
4#include "vlan.h" 5#include "vlan.h"
5 6
6/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ 7/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
7int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, 8int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
8 u16 vlan_tci, int polling) 9 u16 vlan_tci, int polling)
9{ 10{
11 if (netpoll_rx(skb))
12 return NET_RX_DROP;
13
10 if (skb_bond_should_drop(skb)) 14 if (skb_bond_should_drop(skb))
11 goto drop; 15 goto drop;
12 16
@@ -100,6 +104,9 @@ int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
100{ 104{
101 int err = NET_RX_SUCCESS; 105 int err = NET_RX_SUCCESS;
102 106
107 if (netpoll_receive_skb(skb))
108 return NET_RX_DROP;
109
103 switch (vlan_gro_common(napi, grp, vlan_tci, skb)) { 110 switch (vlan_gro_common(napi, grp, vlan_tci, skb)) {
104 case -1: 111 case -1:
105 return netif_receive_skb(skb); 112 return netif_receive_skb(skb);
@@ -126,6 +133,9 @@ int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
126 if (!skb) 133 if (!skb)
127 goto out; 134 goto out;
128 135
136 if (netpoll_receive_skb(skb))
137 goto out;
138
129 err = NET_RX_SUCCESS; 139 err = NET_RX_SUCCESS;
130 140
131 switch (vlan_gro_common(napi, grp, vlan_tci, skb)) { 141 switch (vlan_gro_common(napi, grp, vlan_tci, skb)) {
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index dcd7666824ba..fc70147c771e 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -29,6 +29,7 @@
29#include <linux/errno.h> 29#include <linux/errno.h>
30#include <linux/uaccess.h> 30#include <linux/uaccess.h>
31#include <linux/sched.h> 31#include <linux/sched.h>
32#include <linux/types.h>
32#include <net/9p/9p.h> 33#include <net/9p/9p.h>
33#include <net/9p/client.h> 34#include <net/9p/client.h>
34#include "protocol.h" 35#include "protocol.h"
@@ -160,29 +161,32 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
160 break; 161 break;
161 case 'w':{ 162 case 'w':{
162 int16_t *val = va_arg(ap, int16_t *); 163 int16_t *val = va_arg(ap, int16_t *);
163 if (pdu_read(pdu, val, sizeof(*val))) { 164 __le16 le_val;
165 if (pdu_read(pdu, &le_val, sizeof(le_val))) {
164 errcode = -EFAULT; 166 errcode = -EFAULT;
165 break; 167 break;
166 } 168 }
167 *val = cpu_to_le16(*val); 169 *val = le16_to_cpu(le_val);
168 } 170 }
169 break; 171 break;
170 case 'd':{ 172 case 'd':{
171 int32_t *val = va_arg(ap, int32_t *); 173 int32_t *val = va_arg(ap, int32_t *);
172 if (pdu_read(pdu, val, sizeof(*val))) { 174 __le32 le_val;
175 if (pdu_read(pdu, &le_val, sizeof(le_val))) {
173 errcode = -EFAULT; 176 errcode = -EFAULT;
174 break; 177 break;
175 } 178 }
176 *val = cpu_to_le32(*val); 179 *val = le32_to_cpu(le_val);
177 } 180 }
178 break; 181 break;
179 case 'q':{ 182 case 'q':{
180 int64_t *val = va_arg(ap, int64_t *); 183 int64_t *val = va_arg(ap, int64_t *);
181 if (pdu_read(pdu, val, sizeof(*val))) { 184 __le64 le_val;
185 if (pdu_read(pdu, &le_val, sizeof(le_val))) {
182 errcode = -EFAULT; 186 errcode = -EFAULT;
183 break; 187 break;
184 } 188 }
185 *val = cpu_to_le64(*val); 189 *val = le64_to_cpu(le_val);
186 } 190 }
187 break; 191 break;
188 case 's':{ 192 case 's':{
@@ -362,19 +366,19 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
362 } 366 }
363 break; 367 break;
364 case 'w':{ 368 case 'w':{
365 int16_t val = va_arg(ap, int); 369 __le16 val = cpu_to_le16(va_arg(ap, int));
366 if (pdu_write(pdu, &val, sizeof(val))) 370 if (pdu_write(pdu, &val, sizeof(val)))
367 errcode = -EFAULT; 371 errcode = -EFAULT;
368 } 372 }
369 break; 373 break;
370 case 'd':{ 374 case 'd':{
371 int32_t val = va_arg(ap, int32_t); 375 __le32 val = cpu_to_le32(va_arg(ap, int32_t));
372 if (pdu_write(pdu, &val, sizeof(val))) 376 if (pdu_write(pdu, &val, sizeof(val)))
373 errcode = -EFAULT; 377 errcode = -EFAULT;
374 } 378 }
375 break; 379 break;
376 case 'q':{ 380 case 'q':{
377 int64_t val = va_arg(ap, int64_t); 381 __le64 val = cpu_to_le64(va_arg(ap, int64_t));
378 if (pdu_write(pdu, &val, sizeof(val))) 382 if (pdu_write(pdu, &val, sizeof(val)))
379 errcode = -EFAULT; 383 errcode = -EFAULT;
380 } 384 }
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index bdd9ccea17ce..d2c27c808d3b 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -67,6 +67,11 @@ static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
67{ 67{
68 struct net_device *indev; 68 struct net_device *indev;
69 69
70 if (skb_warn_if_lro(skb)) {
71 kfree_skb(skb);
72 return;
73 }
74
70 indev = skb->dev; 75 indev = skb->dev;
71 skb->dev = to->dev; 76 skb->dev = to->dev;
72 skb_forward_csum(skb); 77 skb_forward_csum(skb);
@@ -89,7 +94,7 @@ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
89/* called with rcu_read_lock */ 94/* called with rcu_read_lock */
90void br_forward(const struct net_bridge_port *to, struct sk_buff *skb) 95void br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
91{ 96{
92 if (!skb_warn_if_lro(skb) && should_deliver(to, skb)) { 97 if (should_deliver(to, skb)) {
93 __br_forward(to, skb); 98 __br_forward(to, skb);
94 return; 99 return;
95 } 100 }
diff --git a/net/core/dev.c b/net/core/dev.c
index 5379b0c1190a..72b0d26fd46d 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1090,7 +1090,7 @@ int dev_open(struct net_device *dev)
1090 /* 1090 /*
1091 * Enable NET_DMA 1091 * Enable NET_DMA
1092 */ 1092 */
1093 dmaengine_get(); 1093 net_dmaengine_get();
1094 1094
1095 /* 1095 /*
1096 * Initialize multicasting status 1096 * Initialize multicasting status
@@ -1172,7 +1172,7 @@ int dev_close(struct net_device *dev)
1172 /* 1172 /*
1173 * Shutdown NET_DMA 1173 * Shutdown NET_DMA
1174 */ 1174 */
1175 dmaengine_put(); 1175 net_dmaengine_put();
1176 1176
1177 return 0; 1177 return 0;
1178} 1178}
@@ -2488,6 +2488,9 @@ static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2488 2488
2489int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2489int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2490{ 2490{
2491 if (netpoll_receive_skb(skb))
2492 return NET_RX_DROP;
2493
2491 switch (__napi_gro_receive(napi, skb)) { 2494 switch (__napi_gro_receive(napi, skb)) {
2492 case -1: 2495 case -1:
2493 return netif_receive_skb(skb); 2496 return netif_receive_skb(skb);
@@ -2558,6 +2561,9 @@ int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
2558 if (!skb) 2561 if (!skb)
2559 goto out; 2562 goto out;
2560 2563
2564 if (netpoll_receive_skb(skb))
2565 goto out;
2566
2561 err = NET_RX_SUCCESS; 2567 err = NET_RX_SUCCESS;
2562 2568
2563 switch (__napi_gro_receive(napi, skb)) { 2569 switch (__napi_gro_receive(napi, skb)) {
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index f66c58df8953..278a142d1047 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1994,8 +1994,8 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1994 if (!net_eq(neigh_parms_net(p), net)) 1994 if (!net_eq(neigh_parms_net(p), net))
1995 continue; 1995 continue;
1996 1996
1997 if (nidx++ < neigh_skip) 1997 if (nidx < neigh_skip)
1998 continue; 1998 goto next;
1999 1999
2000 if (neightbl_fill_param_info(skb, tbl, p, 2000 if (neightbl_fill_param_info(skb, tbl, p,
2001 NETLINK_CB(cb->skb).pid, 2001 NETLINK_CB(cb->skb).pid,
@@ -2003,6 +2003,8 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2003 RTM_NEWNEIGHTBL, 2003 RTM_NEWNEIGHTBL,
2004 NLM_F_MULTI) <= 0) 2004 NLM_F_MULTI) <= 0)
2005 goto out; 2005 goto out;
2006 next:
2007 nidx++;
2006 } 2008 }
2007 2009
2008 neigh_skip = 0; 2010 neigh_skip = 0;
@@ -2082,12 +2084,10 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2082 if (h > s_h) 2084 if (h > s_h)
2083 s_idx = 0; 2085 s_idx = 0;
2084 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) { 2086 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) {
2085 int lidx;
2086 if (dev_net(n->dev) != net) 2087 if (dev_net(n->dev) != net)
2087 continue; 2088 continue;
2088 lidx = idx++; 2089 if (idx < s_idx)
2089 if (lidx < s_idx) 2090 goto next;
2090 continue;
2091 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid, 2091 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2092 cb->nlh->nlmsg_seq, 2092 cb->nlh->nlmsg_seq,
2093 RTM_NEWNEIGH, 2093 RTM_NEWNEIGH,
@@ -2096,6 +2096,8 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2096 rc = -1; 2096 rc = -1;
2097 goto out; 2097 goto out;
2098 } 2098 }
2099 next:
2100 idx++;
2099 } 2101 }
2100 } 2102 }
2101 read_unlock_bh(&tbl->lock); 2103 read_unlock_bh(&tbl->lock);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 55151faaf90c..2adb1a7d361f 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -32,24 +32,14 @@ static __net_init int setup_net(struct net *net)
32{ 32{
33 /* Must be called with net_mutex held */ 33 /* Must be called with net_mutex held */
34 struct pernet_operations *ops; 34 struct pernet_operations *ops;
35 int error; 35 int error = 0;
36 struct net_generic *ng;
37 36
38 atomic_set(&net->count, 1); 37 atomic_set(&net->count, 1);
38
39#ifdef NETNS_REFCNT_DEBUG 39#ifdef NETNS_REFCNT_DEBUG
40 atomic_set(&net->use_count, 0); 40 atomic_set(&net->use_count, 0);
41#endif 41#endif
42 42
43 error = -ENOMEM;
44 ng = kzalloc(sizeof(struct net_generic) +
45 INITIAL_NET_GEN_PTRS * sizeof(void *), GFP_KERNEL);
46 if (ng == NULL)
47 goto out;
48
49 ng->len = INITIAL_NET_GEN_PTRS;
50 rcu_assign_pointer(net->gen, ng);
51
52 error = 0;
53 list_for_each_entry(ops, &pernet_list, list) { 43 list_for_each_entry(ops, &pernet_list, list) {
54 if (ops->init) { 44 if (ops->init) {
55 error = ops->init(net); 45 error = ops->init(net);
@@ -70,24 +60,50 @@ out_undo:
70 } 60 }
71 61
72 rcu_barrier(); 62 rcu_barrier();
73 kfree(ng);
74 goto out; 63 goto out;
75} 64}
76 65
66static struct net_generic *net_alloc_generic(void)
67{
68 struct net_generic *ng;
69 size_t generic_size = sizeof(struct net_generic) +
70 INITIAL_NET_GEN_PTRS * sizeof(void *);
71
72 ng = kzalloc(generic_size, GFP_KERNEL);
73 if (ng)
74 ng->len = INITIAL_NET_GEN_PTRS;
75
76 return ng;
77}
78
77#ifdef CONFIG_NET_NS 79#ifdef CONFIG_NET_NS
78static struct kmem_cache *net_cachep; 80static struct kmem_cache *net_cachep;
79static struct workqueue_struct *netns_wq; 81static struct workqueue_struct *netns_wq;
80 82
81static struct net *net_alloc(void) 83static struct net *net_alloc(void)
82{ 84{
83 return kmem_cache_zalloc(net_cachep, GFP_KERNEL); 85 struct net *net = NULL;
86 struct net_generic *ng;
87
88 ng = net_alloc_generic();
89 if (!ng)
90 goto out;
91
92 net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
93 if (!net)
94 goto out_free;
95
96 rcu_assign_pointer(net->gen, ng);
97out:
98 return net;
99
100out_free:
101 kfree(ng);
102 goto out;
84} 103}
85 104
86static void net_free(struct net *net) 105static void net_free(struct net *net)
87{ 106{
88 if (!net)
89 return;
90
91#ifdef NETNS_REFCNT_DEBUG 107#ifdef NETNS_REFCNT_DEBUG
92 if (unlikely(atomic_read(&net->use_count) != 0)) { 108 if (unlikely(atomic_read(&net->use_count) != 0)) {
93 printk(KERN_EMERG "network namespace not free! Usage: %d\n", 109 printk(KERN_EMERG "network namespace not free! Usage: %d\n",
@@ -112,27 +128,28 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net)
112 err = -ENOMEM; 128 err = -ENOMEM;
113 new_net = net_alloc(); 129 new_net = net_alloc();
114 if (!new_net) 130 if (!new_net)
115 goto out; 131 goto out_err;
116 132
117 mutex_lock(&net_mutex); 133 mutex_lock(&net_mutex);
118 err = setup_net(new_net); 134 err = setup_net(new_net);
119 if (err) 135 if (!err) {
120 goto out_unlock; 136 rtnl_lock();
121 137 list_add_tail(&new_net->list, &net_namespace_list);
122 rtnl_lock(); 138 rtnl_unlock();
123 list_add_tail(&new_net->list, &net_namespace_list); 139 }
124 rtnl_unlock();
125
126
127out_unlock:
128 mutex_unlock(&net_mutex); 140 mutex_unlock(&net_mutex);
141
142 if (err)
143 goto out_free;
129out: 144out:
130 put_net(old_net); 145 put_net(old_net);
131 if (err) {
132 net_free(new_net);
133 new_net = ERR_PTR(err);
134 }
135 return new_net; 146 return new_net;
147
148out_free:
149 net_free(new_net);
150out_err:
151 new_net = ERR_PTR(err);
152 goto out;
136} 153}
137 154
138static void cleanup_net(struct work_struct *work) 155static void cleanup_net(struct work_struct *work)
@@ -188,6 +205,7 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net)
188 205
189static int __init net_ns_init(void) 206static int __init net_ns_init(void)
190{ 207{
208 struct net_generic *ng;
191 int err; 209 int err;
192 210
193 printk(KERN_INFO "net_namespace: %zd bytes\n", sizeof(struct net)); 211 printk(KERN_INFO "net_namespace: %zd bytes\n", sizeof(struct net));
@@ -202,6 +220,12 @@ static int __init net_ns_init(void)
202 panic("Could not create netns workq"); 220 panic("Could not create netns workq");
203#endif 221#endif
204 222
223 ng = net_alloc_generic();
224 if (!ng)
225 panic("Could not allocate generic netns");
226
227 rcu_assign_pointer(init_net.gen, ng);
228
205 mutex_lock(&net_mutex); 229 mutex_lock(&net_mutex);
206 err = setup_net(&init_net); 230 err = setup_net(&init_net);
207 231
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index da74b844f4ea..c6a6b166f8d6 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -143,14 +143,6 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
143 BUG(); 143 BUG();
144} 144}
145 145
146void skb_truesize_bug(struct sk_buff *skb)
147{
148 WARN(net_ratelimit(), KERN_ERR "SKB BUG: Invalid truesize (%u) "
149 "len=%u, sizeof(sk_buff)=%Zd\n",
150 skb->truesize, skb->len, sizeof(struct sk_buff));
151}
152EXPORT_SYMBOL(skb_truesize_bug);
153
154/* Allocate a new skbuff. We do this ourselves so we can fill in a few 146/* Allocate a new skbuff. We do this ourselves so we can fill in a few
155 * 'private' fields and also do memory statistics to find all the 147 * 'private' fields and also do memory statistics to find all the
156 * [BEEP] leaks. 148 * [BEEP] leaks.
diff --git a/net/core/sock.c b/net/core/sock.c
index f3a0d08cbb48..5f97caa158e8 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -696,6 +696,8 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
696 if (len < 0) 696 if (len < 0)
697 return -EINVAL; 697 return -EINVAL;
698 698
699 memset(&v, 0, sizeof(v));
700
699 switch(optname) { 701 switch(optname) {
700 case SO_DEBUG: 702 case SO_DEBUG:
701 v.val = sock_flag(sk, SOCK_DBG); 703 v.val = sock_flag(sk, SOCK_DBG);
@@ -1135,7 +1137,6 @@ void sock_rfree(struct sk_buff *skb)
1135{ 1137{
1136 struct sock *sk = skb->sk; 1138 struct sock *sk = skb->sk;
1137 1139
1138 skb_truesize_check(skb);
1139 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 1140 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
1140 sk_mem_uncharge(skb->sk, skb->truesize); 1141 sk_mem_uncharge(skb->sk, skb->truesize);
1141} 1142}
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 6bb2635b5ded..7bc992976d29 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -3,11 +3,16 @@
3 * 3 *
4 * This is an implementation of the CIPSO 2.2 protocol as specified in 4 * This is an implementation of the CIPSO 2.2 protocol as specified in
5 * draft-ietf-cipso-ipsecurity-01.txt with additional tag types as found in 5 * draft-ietf-cipso-ipsecurity-01.txt with additional tag types as found in
6 * FIPS-188, copies of both documents can be found in the Documentation 6 * FIPS-188. While CIPSO never became a full IETF RFC standard many vendors
7 * directory. While CIPSO never became a full IETF RFC standard many vendors
8 * have chosen to adopt the protocol and over the years it has become a 7 * have chosen to adopt the protocol and over the years it has become a
9 * de-facto standard for labeled networking. 8 * de-facto standard for labeled networking.
10 * 9 *
10 * The CIPSO draft specification can be found in the kernel's Documentation
11 * directory as well as the following URL:
12 * http://netlabel.sourceforge.net/files/draft-ietf-cipso-ipsecurity-01.txt
13 * The FIPS-188 specification can be found at the following URL:
14 * http://www.itl.nist.gov/fipspubs/fip188.htm
15 *
11 * Author: Paul Moore <paul.moore@hp.com> 16 * Author: Paul Moore <paul.moore@hp.com>
12 * 17 *
13 */ 18 */
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a6961d75c7ea..c28976a7e596 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1374,7 +1374,8 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
1374 1374
1375static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, 1375static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1376 struct tcp_sacktag_state *state, 1376 struct tcp_sacktag_state *state,
1377 unsigned int pcount, int shifted, int mss) 1377 unsigned int pcount, int shifted, int mss,
1378 int dup_sack)
1378{ 1379{
1379 struct tcp_sock *tp = tcp_sk(sk); 1380 struct tcp_sock *tp = tcp_sk(sk);
1380 struct sk_buff *prev = tcp_write_queue_prev(sk, skb); 1381 struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
@@ -1410,7 +1411,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1410 } 1411 }
1411 1412
1412 /* We discard results */ 1413 /* We discard results */
1413 tcp_sacktag_one(skb, sk, state, 0, pcount); 1414 tcp_sacktag_one(skb, sk, state, dup_sack, pcount);
1414 1415
1415 /* Difference in this won't matter, both ACKed by the same cumul. ACK */ 1416 /* Difference in this won't matter, both ACKed by the same cumul. ACK */
1416 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); 1417 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
@@ -1561,7 +1562,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
1561 1562
1562 if (!skb_shift(prev, skb, len)) 1563 if (!skb_shift(prev, skb, len))
1563 goto fallback; 1564 goto fallback;
1564 if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss)) 1565 if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack))
1565 goto out; 1566 goto out;
1566 1567
1567 /* Hole filled allows collapsing with the next as well, this is very 1568 /* Hole filled allows collapsing with the next as well, this is very
@@ -1580,7 +1581,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
1580 len = skb->len; 1581 len = skb->len;
1581 if (skb_shift(prev, skb, len)) { 1582 if (skb_shift(prev, skb, len)) {
1582 pcount += tcp_skb_pcount(skb); 1583 pcount += tcp_skb_pcount(skb);
1583 tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss); 1584 tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0);
1584 } 1585 }
1585 1586
1586out: 1587out:
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index dda42f0bd7a3..da2c3b8794f2 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2023,7 +2023,6 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
2023 last_lost = tp->snd_una; 2023 last_lost = tp->snd_una;
2024 } 2024 }
2025 2025
2026 /* First pass: retransmit lost packets. */
2027 tcp_for_write_queue_from(skb, sk) { 2026 tcp_for_write_queue_from(skb, sk) {
2028 __u8 sacked = TCP_SKB_CB(skb)->sacked; 2027 __u8 sacked = TCP_SKB_CB(skb)->sacked;
2029 2028
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 2747ec7bfb63..4660b088a8ce 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -1,6 +1,6 @@
1/* Tom Kelly's Scalable TCP 1/* Tom Kelly's Scalable TCP
2 * 2 *
3 * See htt://www-lce.eng.cam.ac.uk/~ctk21/scalable/ 3 * See http://www.deneholme.net/tom/scalable/
4 * 4 *
5 * John Heffner <jheffner@sc.edu> 5 * John Heffner <jheffner@sc.edu>
6 */ 6 */
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index cc3a0a06c004..c47c989cb1fb 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1234,8 +1234,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
1234 struct udphdr *uh; 1234 struct udphdr *uh;
1235 unsigned short ulen; 1235 unsigned short ulen;
1236 struct rtable *rt = (struct rtable*)skb->dst; 1236 struct rtable *rt = (struct rtable*)skb->dst;
1237 __be32 saddr = ip_hdr(skb)->saddr; 1237 __be32 saddr, daddr;
1238 __be32 daddr = ip_hdr(skb)->daddr;
1239 struct net *net = dev_net(skb->dev); 1238 struct net *net = dev_net(skb->dev);
1240 1239
1241 /* 1240 /*
@@ -1259,6 +1258,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
1259 if (udp4_csum_init(skb, uh, proto)) 1258 if (udp4_csum_init(skb, uh, proto))
1260 goto csum_error; 1259 goto csum_error;
1261 1260
1261 saddr = ip_hdr(skb)->saddr;
1262 daddr = ip_hdr(skb)->daddr;
1263
1262 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) 1264 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
1263 return __udp4_lib_mcast_deliver(net, skb, uh, 1265 return __udp4_lib_mcast_deliver(net, skb, uh,
1264 saddr, daddr, udptable); 1266 saddr, daddr, udptable);
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 8fe267feb81e..1bcc3431859e 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -258,11 +258,11 @@ unique:
258 258
259 if (twp != NULL) { 259 if (twp != NULL) {
260 *twp = tw; 260 *twp = tw;
261 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITRECYCLED); 261 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
262 } else if (tw != NULL) { 262 } else if (tw != NULL) {
263 /* Silly. Should hash-dance instead... */ 263 /* Silly. Should hash-dance instead... */
264 inet_twsk_deschedule(tw, death_row); 264 inet_twsk_deschedule(tw, death_row);
265 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITRECYCLED); 265 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
266 266
267 inet_twsk_put(tw); 267 inet_twsk_put(tw);
268 } 268 }
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index c62dd247774f..7712578bdc66 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -323,17 +323,21 @@ static struct ip6_flowlabel *
323fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval, 323fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval,
324 int optlen, int *err_p) 324 int optlen, int *err_p)
325{ 325{
326 struct ip6_flowlabel *fl; 326 struct ip6_flowlabel *fl = NULL;
327 int olen; 327 int olen;
328 int addr_type; 328 int addr_type;
329 int err; 329 int err;
330 330
331 olen = optlen - CMSG_ALIGN(sizeof(*freq));
332 err = -EINVAL;
333 if (olen > 64 * 1024)
334 goto done;
335
331 err = -ENOMEM; 336 err = -ENOMEM;
332 fl = kzalloc(sizeof(*fl), GFP_KERNEL); 337 fl = kzalloc(sizeof(*fl), GFP_KERNEL);
333 if (fl == NULL) 338 if (fl == NULL)
334 goto done; 339 goto done;
335 340
336 olen = optlen - CMSG_ALIGN(sizeof(*freq));
337 if (olen > 0) { 341 if (olen > 0) {
338 struct msghdr msg; 342 struct msghdr msg;
339 struct flowi flowi; 343 struct flowi flowi;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 58e2b0d93758..d994c55a5b16 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -249,8 +249,8 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p)
249 } 249 }
250 250
251 t = netdev_priv(dev); 251 t = netdev_priv(dev);
252 ip6_tnl_dev_init(dev);
253 t->parms = *p; 252 t->parms = *p;
253 ip6_tnl_dev_init(dev);
254 254
255 if ((err = register_netdevice(dev)) < 0) 255 if ((err = register_netdevice(dev)) < 0)
256 goto failed_free; 256 goto failed_free;
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index c455cf4ee756..72dbb6d1a6b3 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -49,8 +49,19 @@ static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
49static const u_int8_t invmap[] = { 49static const u_int8_t invmap[] = {
50 [ICMPV6_ECHO_REQUEST - 128] = ICMPV6_ECHO_REPLY + 1, 50 [ICMPV6_ECHO_REQUEST - 128] = ICMPV6_ECHO_REPLY + 1,
51 [ICMPV6_ECHO_REPLY - 128] = ICMPV6_ECHO_REQUEST + 1, 51 [ICMPV6_ECHO_REPLY - 128] = ICMPV6_ECHO_REQUEST + 1,
52 [ICMPV6_NI_QUERY - 128] = ICMPV6_NI_QUERY + 1, 52 [ICMPV6_NI_QUERY - 128] = ICMPV6_NI_REPLY + 1,
53 [ICMPV6_NI_REPLY - 128] = ICMPV6_NI_REPLY +1 53 [ICMPV6_NI_REPLY - 128] = ICMPV6_NI_QUERY +1
54};
55
56static const u_int8_t noct_valid_new[] = {
57 [ICMPV6_MGM_QUERY - 130] = 1,
58 [ICMPV6_MGM_REPORT -130] = 1,
59 [ICMPV6_MGM_REDUCTION - 130] = 1,
60 [NDISC_ROUTER_SOLICITATION - 130] = 1,
61 [NDISC_ROUTER_ADVERTISEMENT - 130] = 1,
62 [NDISC_NEIGHBOUR_SOLICITATION - 130] = 1,
63 [NDISC_NEIGHBOUR_ADVERTISEMENT - 130] = 1,
64 [ICMPV6_MLD2_REPORT - 130] = 1
54}; 65};
55 66
56static bool icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple, 67static bool icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple,
@@ -178,6 +189,7 @@ icmpv6_error(struct net *net, struct sk_buff *skb, unsigned int dataoff,
178{ 189{
179 const struct icmp6hdr *icmp6h; 190 const struct icmp6hdr *icmp6h;
180 struct icmp6hdr _ih; 191 struct icmp6hdr _ih;
192 int type;
181 193
182 icmp6h = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih); 194 icmp6h = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih);
183 if (icmp6h == NULL) { 195 if (icmp6h == NULL) {
@@ -189,11 +201,21 @@ icmpv6_error(struct net *net, struct sk_buff *skb, unsigned int dataoff,
189 201
190 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && 202 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
191 nf_ip6_checksum(skb, hooknum, dataoff, IPPROTO_ICMPV6)) { 203 nf_ip6_checksum(skb, hooknum, dataoff, IPPROTO_ICMPV6)) {
192 nf_log_packet(PF_INET6, 0, skb, NULL, NULL, NULL, 204 if (LOG_INVALID(net, IPPROTO_ICMPV6))
193 "nf_ct_icmpv6: ICMPv6 checksum failed\n"); 205 nf_log_packet(PF_INET6, 0, skb, NULL, NULL, NULL,
206 "nf_ct_icmpv6: ICMPv6 checksum failed ");
194 return -NF_ACCEPT; 207 return -NF_ACCEPT;
195 } 208 }
196 209
210 type = icmp6h->icmp6_type - 130;
211 if (type >= 0 && type < sizeof(noct_valid_new) &&
212 noct_valid_new[type]) {
213 skb->nfct = &nf_conntrack_untracked.ct_general;
214 skb->nfctinfo = IP_CT_NEW;
215 nf_conntrack_get(skb->nfct);
216 return NF_ACCEPT;
217 }
218
197 /* is not error message ? */ 219 /* is not error message ? */
198 if (icmp6h->icmp6_type >= 128) 220 if (icmp6h->icmp6_type >= 128)
199 return NF_ACCEPT; 221 return NF_ACCEPT;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 4278e545638f..94de5033f0b6 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1343,6 +1343,8 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
1343 list) { 1343 list) {
1344 if (!netif_running(sdata->dev)) 1344 if (!netif_running(sdata->dev))
1345 continue; 1345 continue;
1346 if (sdata->vif.type != NL80211_IFTYPE_AP)
1347 continue;
1346 if (compare_ether_addr(sdata->dev->dev_addr, 1348 if (compare_ether_addr(sdata->dev->dev_addr,
1347 hdr->addr2)) { 1349 hdr->addr2)) {
1348 dev_hold(sdata->dev); 1350 dev_hold(sdata->dev);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index c32a7e8e3a1b..cb78aa00399e 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -434,7 +434,7 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
434 } else 434 } else
435 return NOTIFY_DONE; 435 return NOTIFY_DONE;
436 436
437 if (!nfnetlink_has_listeners(group)) 437 if (!item->report && !nfnetlink_has_listeners(group))
438 return NOTIFY_DONE; 438 return NOTIFY_DONE;
439 439
440 skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC); 440 skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
@@ -1215,6 +1215,16 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
1215 } 1215 }
1216 } 1216 }
1217 1217
1218#ifdef CONFIG_NF_NAT_NEEDED
1219 if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) {
1220 err = ctnetlink_change_nat_seq_adj(ct, cda);
1221 if (err < 0) {
1222 rcu_read_unlock();
1223 goto err;
1224 }
1225 }
1226#endif
1227
1218 if (cda[CTA_PROTOINFO]) { 1228 if (cda[CTA_PROTOINFO]) {
1219 err = ctnetlink_change_protoinfo(ct, cda); 1229 err = ctnetlink_change_protoinfo(ct, cda);
1220 if (err < 0) { 1230 if (err < 0) {
@@ -1492,7 +1502,8 @@ static int ctnetlink_expect_event(struct notifier_block *this,
1492 } else 1502 } else
1493 return NOTIFY_DONE; 1503 return NOTIFY_DONE;
1494 1504
1495 if (!nfnetlink_has_listeners(NFNLGRP_CONNTRACK_EXP_NEW)) 1505 if (!item->report &&
1506 !nfnetlink_has_listeners(NFNLGRP_CONNTRACK_EXP_NEW))
1496 return NOTIFY_DONE; 1507 return NOTIFY_DONE;
1497 1508
1498 skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC); 1509 skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index fa49dc7fe100..c712e9fc6bba 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -39,7 +39,7 @@
39#endif 39#endif
40 40
41#define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE 41#define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE
42#define NFULNL_TIMEOUT_DEFAULT HZ /* every second */ 42#define NFULNL_TIMEOUT_DEFAULT 100 /* every second */
43#define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */ 43#define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */
44#define NFULNL_COPY_RANGE_MAX 0xFFFF /* max packet size is limited by 16-bit struct nfattr nfa_len field */ 44#define NFULNL_COPY_RANGE_MAX 0xFFFF /* max packet size is limited by 16-bit struct nfattr nfa_len field */
45 45
@@ -590,8 +590,10 @@ nfulnl_log_packet(u_int8_t pf,
590 590
591 qthreshold = inst->qthreshold; 591 qthreshold = inst->qthreshold;
592 /* per-rule qthreshold overrides per-instance */ 592 /* per-rule qthreshold overrides per-instance */
593 if (qthreshold > li->u.ulog.qthreshold) 593 if (li->u.ulog.qthreshold)
594 qthreshold = li->u.ulog.qthreshold; 594 if (qthreshold > li->u.ulog.qthreshold)
595 qthreshold = li->u.ulog.qthreshold;
596
595 597
596 switch (inst->copy_mode) { 598 switch (inst->copy_mode) {
597 case NFULNL_COPY_META: 599 case NFULNL_COPY_META:
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index bfbf521f6ea5..5baccfa5a0de 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -827,59 +827,143 @@ static const struct file_operations xt_table_ops = {
827 .release = seq_release_net, 827 .release = seq_release_net,
828}; 828};
829 829
830static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos) 830/*
831 * Traverse state for ip{,6}_{tables,matches} for helping crossing
832 * the multi-AF mutexes.
833 */
834struct nf_mttg_trav {
835 struct list_head *head, *curr;
836 uint8_t class, nfproto;
837};
838
839enum {
840 MTTG_TRAV_INIT,
841 MTTG_TRAV_NFP_UNSPEC,
842 MTTG_TRAV_NFP_SPEC,
843 MTTG_TRAV_DONE,
844};
845
846static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
847 bool is_target)
831{ 848{
832 struct proc_dir_entry *pde = (struct proc_dir_entry *)seq->private; 849 static const uint8_t next_class[] = {
833 u_int16_t af = (unsigned long)pde->data; 850 [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
851 [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE,
852 };
853 struct nf_mttg_trav *trav = seq->private;
854
855 switch (trav->class) {
856 case MTTG_TRAV_INIT:
857 trav->class = MTTG_TRAV_NFP_UNSPEC;
858 mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
859 trav->head = trav->curr = is_target ?
860 &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
861 break;
862 case MTTG_TRAV_NFP_UNSPEC:
863 trav->curr = trav->curr->next;
864 if (trav->curr != trav->head)
865 break;
866 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
867 mutex_lock(&xt[trav->nfproto].mutex);
868 trav->head = trav->curr = is_target ?
869 &xt[trav->nfproto].target : &xt[trav->nfproto].match;
870 trav->class = next_class[trav->class];
871 break;
872 case MTTG_TRAV_NFP_SPEC:
873 trav->curr = trav->curr->next;
874 if (trav->curr != trav->head)
875 break;
876 /* fallthru, _stop will unlock */
877 default:
878 return NULL;
879 }
834 880
835 mutex_lock(&xt[af].mutex); 881 if (ppos != NULL)
836 return seq_list_start(&xt[af].match, *pos); 882 ++*ppos;
883 return trav;
837} 884}
838 885
839static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *pos) 886static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
887 bool is_target)
840{ 888{
841 struct proc_dir_entry *pde = (struct proc_dir_entry *)seq->private; 889 struct nf_mttg_trav *trav = seq->private;
842 u_int16_t af = (unsigned long)pde->data; 890 unsigned int j;
843 891
844 return seq_list_next(v, &xt[af].match, pos); 892 trav->class = MTTG_TRAV_INIT;
893 for (j = 0; j < *pos; ++j)
894 if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
895 return NULL;
896 return trav;
845} 897}
846 898
847static void xt_match_seq_stop(struct seq_file *seq, void *v) 899static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
848{ 900{
849 struct proc_dir_entry *pde = seq->private; 901 struct nf_mttg_trav *trav = seq->private;
850 u_int16_t af = (unsigned long)pde->data; 902
903 switch (trav->class) {
904 case MTTG_TRAV_NFP_UNSPEC:
905 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
906 break;
907 case MTTG_TRAV_NFP_SPEC:
908 mutex_unlock(&xt[trav->nfproto].mutex);
909 break;
910 }
911}
851 912
852 mutex_unlock(&xt[af].mutex); 913static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
914{
915 return xt_mttg_seq_start(seq, pos, false);
853} 916}
854 917
855static int xt_match_seq_show(struct seq_file *seq, void *v) 918static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
856{ 919{
857 struct xt_match *match = list_entry(v, struct xt_match, list); 920 return xt_mttg_seq_next(seq, v, ppos, false);
921}
858 922
859 if (strlen(match->name)) 923static int xt_match_seq_show(struct seq_file *seq, void *v)
860 return seq_printf(seq, "%s\n", match->name); 924{
861 else 925 const struct nf_mttg_trav *trav = seq->private;
862 return 0; 926 const struct xt_match *match;
927
928 switch (trav->class) {
929 case MTTG_TRAV_NFP_UNSPEC:
930 case MTTG_TRAV_NFP_SPEC:
931 if (trav->curr == trav->head)
932 return 0;
933 match = list_entry(trav->curr, struct xt_match, list);
934 return (*match->name == '\0') ? 0 :
935 seq_printf(seq, "%s\n", match->name);
936 }
937 return 0;
863} 938}
864 939
865static const struct seq_operations xt_match_seq_ops = { 940static const struct seq_operations xt_match_seq_ops = {
866 .start = xt_match_seq_start, 941 .start = xt_match_seq_start,
867 .next = xt_match_seq_next, 942 .next = xt_match_seq_next,
868 .stop = xt_match_seq_stop, 943 .stop = xt_mttg_seq_stop,
869 .show = xt_match_seq_show, 944 .show = xt_match_seq_show,
870}; 945};
871 946
872static int xt_match_open(struct inode *inode, struct file *file) 947static int xt_match_open(struct inode *inode, struct file *file)
873{ 948{
949 struct seq_file *seq;
950 struct nf_mttg_trav *trav;
874 int ret; 951 int ret;
875 952
876 ret = seq_open(file, &xt_match_seq_ops); 953 trav = kmalloc(sizeof(*trav), GFP_KERNEL);
877 if (!ret) { 954 if (trav == NULL)
878 struct seq_file *seq = file->private_data; 955 return -ENOMEM;
879 956
880 seq->private = PDE(inode); 957 ret = seq_open(file, &xt_match_seq_ops);
958 if (ret < 0) {
959 kfree(trav);
960 return ret;
881 } 961 }
882 return ret; 962
963 seq = file->private_data;
964 seq->private = trav;
965 trav->nfproto = (unsigned long)PDE(inode)->data;
966 return 0;
883} 967}
884 968
885static const struct file_operations xt_match_ops = { 969static const struct file_operations xt_match_ops = {
@@ -887,62 +971,63 @@ static const struct file_operations xt_match_ops = {
887 .open = xt_match_open, 971 .open = xt_match_open,
888 .read = seq_read, 972 .read = seq_read,
889 .llseek = seq_lseek, 973 .llseek = seq_lseek,
890 .release = seq_release, 974 .release = seq_release_private,
891}; 975};
892 976
893static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos) 977static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
894{ 978{
895 struct proc_dir_entry *pde = (struct proc_dir_entry *)seq->private; 979 return xt_mttg_seq_start(seq, pos, true);
896 u_int16_t af = (unsigned long)pde->data;
897
898 mutex_lock(&xt[af].mutex);
899 return seq_list_start(&xt[af].target, *pos);
900} 980}
901 981
902static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *pos) 982static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
903{ 983{
904 struct proc_dir_entry *pde = (struct proc_dir_entry *)seq->private; 984 return xt_mttg_seq_next(seq, v, ppos, true);
905 u_int16_t af = (unsigned long)pde->data;
906
907 return seq_list_next(v, &xt[af].target, pos);
908}
909
910static void xt_target_seq_stop(struct seq_file *seq, void *v)
911{
912 struct proc_dir_entry *pde = seq->private;
913 u_int16_t af = (unsigned long)pde->data;
914
915 mutex_unlock(&xt[af].mutex);
916} 985}
917 986
918static int xt_target_seq_show(struct seq_file *seq, void *v) 987static int xt_target_seq_show(struct seq_file *seq, void *v)
919{ 988{
920 struct xt_target *target = list_entry(v, struct xt_target, list); 989 const struct nf_mttg_trav *trav = seq->private;
921 990 const struct xt_target *target;
922 if (strlen(target->name)) 991
923 return seq_printf(seq, "%s\n", target->name); 992 switch (trav->class) {
924 else 993 case MTTG_TRAV_NFP_UNSPEC:
925 return 0; 994 case MTTG_TRAV_NFP_SPEC:
995 if (trav->curr == trav->head)
996 return 0;
997 target = list_entry(trav->curr, struct xt_target, list);
998 return (*target->name == '\0') ? 0 :
999 seq_printf(seq, "%s\n", target->name);
1000 }
1001 return 0;
926} 1002}
927 1003
928static const struct seq_operations xt_target_seq_ops = { 1004static const struct seq_operations xt_target_seq_ops = {
929 .start = xt_target_seq_start, 1005 .start = xt_target_seq_start,
930 .next = xt_target_seq_next, 1006 .next = xt_target_seq_next,
931 .stop = xt_target_seq_stop, 1007 .stop = xt_mttg_seq_stop,
932 .show = xt_target_seq_show, 1008 .show = xt_target_seq_show,
933}; 1009};
934 1010
935static int xt_target_open(struct inode *inode, struct file *file) 1011static int xt_target_open(struct inode *inode, struct file *file)
936{ 1012{
1013 struct seq_file *seq;
1014 struct nf_mttg_trav *trav;
937 int ret; 1015 int ret;
938 1016
939 ret = seq_open(file, &xt_target_seq_ops); 1017 trav = kmalloc(sizeof(*trav), GFP_KERNEL);
940 if (!ret) { 1018 if (trav == NULL)
941 struct seq_file *seq = file->private_data; 1019 return -ENOMEM;
942 1020
943 seq->private = PDE(inode); 1021 ret = seq_open(file, &xt_target_seq_ops);
1022 if (ret < 0) {
1023 kfree(trav);
1024 return ret;
944 } 1025 }
945 return ret; 1026
1027 seq = file->private_data;
1028 seq->private = trav;
1029 trav->nfproto = (unsigned long)PDE(inode)->data;
1030 return 0;
946} 1031}
947 1032
948static const struct file_operations xt_target_ops = { 1033static const struct file_operations xt_target_ops = {
@@ -950,7 +1035,7 @@ static const struct file_operations xt_target_ops = {
950 .open = xt_target_open, 1035 .open = xt_target_open,
951 .read = seq_read, 1036 .read = seq_read,
952 .llseek = seq_lseek, 1037 .llseek = seq_lseek,
953 .release = seq_release, 1038 .release = seq_release_private,
954}; 1039};
955 1040
956#define FORMAT_TABLES "_tables_names" 1041#define FORMAT_TABLES "_tables_names"
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index fe80b614a400..791e030ea903 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -542,7 +542,7 @@ recent_mt_proc_write(struct file *file, const char __user *input,
542 struct recent_entry *e; 542 struct recent_entry *e;
543 char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:5afe:c0de")]; 543 char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:5afe:c0de")];
544 const char *c = buf; 544 const char *c = buf;
545 union nf_inet_addr addr; 545 union nf_inet_addr addr = {};
546 u_int16_t family; 546 u_int16_t family;
547 bool add, succ; 547 bool add, succ;
548 548
diff --git a/net/netfilter/xt_sctp.c b/net/netfilter/xt_sctp.c
index e223cb43ae8e..a189ada9128f 100644
--- a/net/netfilter/xt_sctp.c
+++ b/net/netfilter/xt_sctp.c
@@ -105,7 +105,7 @@ match_packet(const struct sk_buff *skb,
105 105
106 switch (chunk_match_type) { 106 switch (chunk_match_type) {
107 case SCTP_CHUNK_MATCH_ALL: 107 case SCTP_CHUNK_MATCH_ALL:
108 return SCTP_CHUNKMAP_IS_CLEAR(info->chunkmap); 108 return SCTP_CHUNKMAP_IS_CLEAR(chunkmapcopy);
109 case SCTP_CHUNK_MATCH_ANY: 109 case SCTP_CHUNK_MATCH_ANY:
110 return false; 110 return false;
111 case SCTP_CHUNK_MATCH_ONLY: 111 case SCTP_CHUNK_MATCH_ONLY:
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index 6a91a32a80c1..4aa888584d20 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -207,7 +207,6 @@ static int gprs_xmit(struct sk_buff *skb, struct net_device *dev)
207 dev->name, err); 207 dev->name, err);
208 dev->stats.tx_aborted_errors++; 208 dev->stats.tx_aborted_errors++;
209 dev->stats.tx_errors++; 209 dev->stats.tx_errors++;
210 dev_kfree_skb(skb);
211 } else { 210 } else {
212 dev->stats.tx_packets++; 211 dev->stats.tx_packets++;
213 dev->stats.tx_bytes += len; 212 dev->stats.tx_bytes += len;
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index bb3e67849b38..8ad2b5333881 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -553,7 +553,7 @@ static int pep_do_rcv(struct sock *sk, struct sk_buff *skb)
553{ 553{
554 struct pep_sock *pn = pep_sk(sk); 554 struct pep_sock *pn = pep_sk(sk);
555 struct sock *sknode; 555 struct sock *sknode;
556 struct pnpipehdr *hdr = pnp_hdr(skb); 556 struct pnpipehdr *hdr;
557 struct sockaddr_pn dst; 557 struct sockaddr_pn dst;
558 int err = NET_RX_SUCCESS; 558 int err = NET_RX_SUCCESS;
559 u8 pipe_handle; 559 u8 pipe_handle;
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index d7d2bed7a699..eac5e7bb7365 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -284,13 +284,13 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
284 if (IS_ERR(trans)) { 284 if (IS_ERR(trans)) {
285 call = ERR_CAST(trans); 285 call = ERR_CAST(trans);
286 trans = NULL; 286 trans = NULL;
287 goto out; 287 goto out_notrans;
288 } 288 }
289 } else { 289 } else {
290 trans = rx->trans; 290 trans = rx->trans;
291 if (!trans) { 291 if (!trans) {
292 call = ERR_PTR(-ENOTCONN); 292 call = ERR_PTR(-ENOTCONN);
293 goto out; 293 goto out_notrans;
294 } 294 }
295 atomic_inc(&trans->usage); 295 atomic_inc(&trans->usage);
296 } 296 }
@@ -315,6 +315,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
315 rxrpc_put_bundle(trans, bundle); 315 rxrpc_put_bundle(trans, bundle);
316out: 316out:
317 rxrpc_put_transport(trans); 317 rxrpc_put_transport(trans);
318out_notrans:
318 release_sock(&rx->sk); 319 release_sock(&rx->sk);
319 _leave(" = %p", call); 320 _leave(" = %p", call);
320 return call; 321 return call;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index f6b4fa97df70..e36e94ab4e10 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -66,11 +66,15 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
66{ 66{
67 struct drr_sched *q = qdisc_priv(sch); 67 struct drr_sched *q = qdisc_priv(sch);
68 struct drr_class *cl = (struct drr_class *)*arg; 68 struct drr_class *cl = (struct drr_class *)*arg;
69 struct nlattr *opt = tca[TCA_OPTIONS];
69 struct nlattr *tb[TCA_DRR_MAX + 1]; 70 struct nlattr *tb[TCA_DRR_MAX + 1];
70 u32 quantum; 71 u32 quantum;
71 int err; 72 int err;
72 73
73 err = nla_parse_nested(tb, TCA_DRR_MAX, tca[TCA_OPTIONS], drr_policy); 74 if (!opt)
75 return -EINVAL;
76
77 err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy);
74 if (err < 0) 78 if (err < 0)
75 return err; 79 return err;
76 80
diff --git a/net/wimax/id-table.c b/net/wimax/id-table.c
index 5e685f7eda90..72273abfcb16 100644
--- a/net/wimax/id-table.c
+++ b/net/wimax/id-table.c
@@ -94,12 +94,13 @@ struct wimax_dev *wimax_dev_get_by_genl_info(
94 list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) { 94 list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) {
95 if (wimax_dev->net_dev->ifindex == ifindex) { 95 if (wimax_dev->net_dev->ifindex == ifindex) {
96 dev_hold(wimax_dev->net_dev); 96 dev_hold(wimax_dev->net_dev);
97 break; 97 goto found;
98 } 98 }
99 } 99 }
100 if (wimax_dev == NULL) 100 wimax_dev = NULL;
101 d_printf(1, NULL, "wimax: no devices found with ifindex %d\n", 101 d_printf(1, NULL, "wimax: no devices found with ifindex %d\n",
102 ifindex); 102 ifindex);
103found:
103 spin_unlock(&wimax_id_table_lock); 104 spin_unlock(&wimax_id_table_lock);
104 d_fnend(3, NULL, "(info %p ifindex %d) = %p\n", 105 d_fnend(3, NULL, "(info %p ifindex %d) = %p\n",
105 info, ifindex, wimax_dev); 106 info, ifindex, wimax_dev);