aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_core.c10
-rw-r--r--net/core/dev.c6
-rw-r--r--net/core/net_namespace.c86
-rw-r--r--net/core/skbuff.c8
-rw-r--r--net/core/sock.c3
-rw-r--r--net/ipv4/tcp_input.c9
-rw-r--r--net/ipv4/tcp_output.c1
-rw-r--r--net/ipv4/tcp_scalable.c2
-rw-r--r--net/ipv6/inet6_hashtables.c4
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c5
-rw-r--r--net/netfilter/nfnetlink_log.c8
-rw-r--r--net/netfilter/x_tables.c199
-rw-r--r--net/netfilter/xt_recent.c2
-rw-r--r--net/sched/sch_drr.c6
14 files changed, 236 insertions, 113 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index e9db889d6222..2886d2fb9ab5 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -1,12 +1,16 @@
1#include <linux/skbuff.h> 1#include <linux/skbuff.h>
2#include <linux/netdevice.h> 2#include <linux/netdevice.h>
3#include <linux/if_vlan.h> 3#include <linux/if_vlan.h>
4#include <linux/netpoll.h>
4#include "vlan.h" 5#include "vlan.h"
5 6
6/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ 7/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
7int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, 8int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
8 u16 vlan_tci, int polling) 9 u16 vlan_tci, int polling)
9{ 10{
11 if (netpoll_rx(skb))
12 return NET_RX_DROP;
13
10 if (skb_bond_should_drop(skb)) 14 if (skb_bond_should_drop(skb))
11 goto drop; 15 goto drop;
12 16
@@ -100,6 +104,9 @@ int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
100{ 104{
101 int err = NET_RX_SUCCESS; 105 int err = NET_RX_SUCCESS;
102 106
107 if (netpoll_receive_skb(skb))
108 return NET_RX_DROP;
109
103 switch (vlan_gro_common(napi, grp, vlan_tci, skb)) { 110 switch (vlan_gro_common(napi, grp, vlan_tci, skb)) {
104 case -1: 111 case -1:
105 return netif_receive_skb(skb); 112 return netif_receive_skb(skb);
@@ -126,6 +133,9 @@ int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
126 if (!skb) 133 if (!skb)
127 goto out; 134 goto out;
128 135
136 if (netpoll_receive_skb(skb))
137 goto out;
138
129 err = NET_RX_SUCCESS; 139 err = NET_RX_SUCCESS;
130 140
131 switch (vlan_gro_common(napi, grp, vlan_tci, skb)) { 141 switch (vlan_gro_common(napi, grp, vlan_tci, skb)) {
diff --git a/net/core/dev.c b/net/core/dev.c
index a17e00662363..72b0d26fd46d 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2488,6 +2488,9 @@ static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2488 2488
2489int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2489int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2490{ 2490{
2491 if (netpoll_receive_skb(skb))
2492 return NET_RX_DROP;
2493
2491 switch (__napi_gro_receive(napi, skb)) { 2494 switch (__napi_gro_receive(napi, skb)) {
2492 case -1: 2495 case -1:
2493 return netif_receive_skb(skb); 2496 return netif_receive_skb(skb);
@@ -2558,6 +2561,9 @@ int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
2558 if (!skb) 2561 if (!skb)
2559 goto out; 2562 goto out;
2560 2563
2564 if (netpoll_receive_skb(skb))
2565 goto out;
2566
2561 err = NET_RX_SUCCESS; 2567 err = NET_RX_SUCCESS;
2562 2568
2563 switch (__napi_gro_receive(napi, skb)) { 2569 switch (__napi_gro_receive(napi, skb)) {
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 55151faaf90c..2adb1a7d361f 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -32,24 +32,14 @@ static __net_init int setup_net(struct net *net)
32{ 32{
33 /* Must be called with net_mutex held */ 33 /* Must be called with net_mutex held */
34 struct pernet_operations *ops; 34 struct pernet_operations *ops;
35 int error; 35 int error = 0;
36 struct net_generic *ng;
37 36
38 atomic_set(&net->count, 1); 37 atomic_set(&net->count, 1);
38
39#ifdef NETNS_REFCNT_DEBUG 39#ifdef NETNS_REFCNT_DEBUG
40 atomic_set(&net->use_count, 0); 40 atomic_set(&net->use_count, 0);
41#endif 41#endif
42 42
43 error = -ENOMEM;
44 ng = kzalloc(sizeof(struct net_generic) +
45 INITIAL_NET_GEN_PTRS * sizeof(void *), GFP_KERNEL);
46 if (ng == NULL)
47 goto out;
48
49 ng->len = INITIAL_NET_GEN_PTRS;
50 rcu_assign_pointer(net->gen, ng);
51
52 error = 0;
53 list_for_each_entry(ops, &pernet_list, list) { 43 list_for_each_entry(ops, &pernet_list, list) {
54 if (ops->init) { 44 if (ops->init) {
55 error = ops->init(net); 45 error = ops->init(net);
@@ -70,24 +60,50 @@ out_undo:
70 } 60 }
71 61
72 rcu_barrier(); 62 rcu_barrier();
73 kfree(ng);
74 goto out; 63 goto out;
75} 64}
76 65
66static struct net_generic *net_alloc_generic(void)
67{
68 struct net_generic *ng;
69 size_t generic_size = sizeof(struct net_generic) +
70 INITIAL_NET_GEN_PTRS * sizeof(void *);
71
72 ng = kzalloc(generic_size, GFP_KERNEL);
73 if (ng)
74 ng->len = INITIAL_NET_GEN_PTRS;
75
76 return ng;
77}
78
77#ifdef CONFIG_NET_NS 79#ifdef CONFIG_NET_NS
78static struct kmem_cache *net_cachep; 80static struct kmem_cache *net_cachep;
79static struct workqueue_struct *netns_wq; 81static struct workqueue_struct *netns_wq;
80 82
81static struct net *net_alloc(void) 83static struct net *net_alloc(void)
82{ 84{
83 return kmem_cache_zalloc(net_cachep, GFP_KERNEL); 85 struct net *net = NULL;
86 struct net_generic *ng;
87
88 ng = net_alloc_generic();
89 if (!ng)
90 goto out;
91
92 net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
93 if (!net)
94 goto out_free;
95
96 rcu_assign_pointer(net->gen, ng);
97out:
98 return net;
99
100out_free:
101 kfree(ng);
102 goto out;
84} 103}
85 104
86static void net_free(struct net *net) 105static void net_free(struct net *net)
87{ 106{
88 if (!net)
89 return;
90
91#ifdef NETNS_REFCNT_DEBUG 107#ifdef NETNS_REFCNT_DEBUG
92 if (unlikely(atomic_read(&net->use_count) != 0)) { 108 if (unlikely(atomic_read(&net->use_count) != 0)) {
93 printk(KERN_EMERG "network namespace not free! Usage: %d\n", 109 printk(KERN_EMERG "network namespace not free! Usage: %d\n",
@@ -112,27 +128,28 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net)
112 err = -ENOMEM; 128 err = -ENOMEM;
113 new_net = net_alloc(); 129 new_net = net_alloc();
114 if (!new_net) 130 if (!new_net)
115 goto out; 131 goto out_err;
116 132
117 mutex_lock(&net_mutex); 133 mutex_lock(&net_mutex);
118 err = setup_net(new_net); 134 err = setup_net(new_net);
119 if (err) 135 if (!err) {
120 goto out_unlock; 136 rtnl_lock();
121 137 list_add_tail(&new_net->list, &net_namespace_list);
122 rtnl_lock(); 138 rtnl_unlock();
123 list_add_tail(&new_net->list, &net_namespace_list); 139 }
124 rtnl_unlock();
125
126
127out_unlock:
128 mutex_unlock(&net_mutex); 140 mutex_unlock(&net_mutex);
141
142 if (err)
143 goto out_free;
129out: 144out:
130 put_net(old_net); 145 put_net(old_net);
131 if (err) {
132 net_free(new_net);
133 new_net = ERR_PTR(err);
134 }
135 return new_net; 146 return new_net;
147
148out_free:
149 net_free(new_net);
150out_err:
151 new_net = ERR_PTR(err);
152 goto out;
136} 153}
137 154
138static void cleanup_net(struct work_struct *work) 155static void cleanup_net(struct work_struct *work)
@@ -188,6 +205,7 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net)
188 205
189static int __init net_ns_init(void) 206static int __init net_ns_init(void)
190{ 207{
208 struct net_generic *ng;
191 int err; 209 int err;
192 210
193 printk(KERN_INFO "net_namespace: %zd bytes\n", sizeof(struct net)); 211 printk(KERN_INFO "net_namespace: %zd bytes\n", sizeof(struct net));
@@ -202,6 +220,12 @@ static int __init net_ns_init(void)
202 panic("Could not create netns workq"); 220 panic("Could not create netns workq");
203#endif 221#endif
204 222
223 ng = net_alloc_generic();
224 if (!ng)
225 panic("Could not allocate generic netns");
226
227 rcu_assign_pointer(init_net.gen, ng);
228
205 mutex_lock(&net_mutex); 229 mutex_lock(&net_mutex);
206 err = setup_net(&init_net); 230 err = setup_net(&init_net);
207 231
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index da74b844f4ea..c6a6b166f8d6 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -143,14 +143,6 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
143 BUG(); 143 BUG();
144} 144}
145 145
146void skb_truesize_bug(struct sk_buff *skb)
147{
148 WARN(net_ratelimit(), KERN_ERR "SKB BUG: Invalid truesize (%u) "
149 "len=%u, sizeof(sk_buff)=%Zd\n",
150 skb->truesize, skb->len, sizeof(struct sk_buff));
151}
152EXPORT_SYMBOL(skb_truesize_bug);
153
154/* Allocate a new skbuff. We do this ourselves so we can fill in a few 146/* Allocate a new skbuff. We do this ourselves so we can fill in a few
155 * 'private' fields and also do memory statistics to find all the 147 * 'private' fields and also do memory statistics to find all the
156 * [BEEP] leaks. 148 * [BEEP] leaks.
diff --git a/net/core/sock.c b/net/core/sock.c
index 6f2e1337975d..5f97caa158e8 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -696,7 +696,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
696 if (len < 0) 696 if (len < 0)
697 return -EINVAL; 697 return -EINVAL;
698 698
699 v.val = 0; 699 memset(&v, 0, sizeof(v));
700 700
701 switch(optname) { 701 switch(optname) {
702 case SO_DEBUG: 702 case SO_DEBUG:
@@ -1137,7 +1137,6 @@ void sock_rfree(struct sk_buff *skb)
1137{ 1137{
1138 struct sock *sk = skb->sk; 1138 struct sock *sk = skb->sk;
1139 1139
1140 skb_truesize_check(skb);
1141 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 1140 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
1142 sk_mem_uncharge(skb->sk, skb->truesize); 1141 sk_mem_uncharge(skb->sk, skb->truesize);
1143} 1142}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a6961d75c7ea..c28976a7e596 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1374,7 +1374,8 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
1374 1374
1375static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, 1375static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1376 struct tcp_sacktag_state *state, 1376 struct tcp_sacktag_state *state,
1377 unsigned int pcount, int shifted, int mss) 1377 unsigned int pcount, int shifted, int mss,
1378 int dup_sack)
1378{ 1379{
1379 struct tcp_sock *tp = tcp_sk(sk); 1380 struct tcp_sock *tp = tcp_sk(sk);
1380 struct sk_buff *prev = tcp_write_queue_prev(sk, skb); 1381 struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
@@ -1410,7 +1411,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1410 } 1411 }
1411 1412
1412 /* We discard results */ 1413 /* We discard results */
1413 tcp_sacktag_one(skb, sk, state, 0, pcount); 1414 tcp_sacktag_one(skb, sk, state, dup_sack, pcount);
1414 1415
1415 /* Difference in this won't matter, both ACKed by the same cumul. ACK */ 1416 /* Difference in this won't matter, both ACKed by the same cumul. ACK */
1416 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); 1417 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
@@ -1561,7 +1562,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
1561 1562
1562 if (!skb_shift(prev, skb, len)) 1563 if (!skb_shift(prev, skb, len))
1563 goto fallback; 1564 goto fallback;
1564 if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss)) 1565 if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack))
1565 goto out; 1566 goto out;
1566 1567
1567 /* Hole filled allows collapsing with the next as well, this is very 1568 /* Hole filled allows collapsing with the next as well, this is very
@@ -1580,7 +1581,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
1580 len = skb->len; 1581 len = skb->len;
1581 if (skb_shift(prev, skb, len)) { 1582 if (skb_shift(prev, skb, len)) {
1582 pcount += tcp_skb_pcount(skb); 1583 pcount += tcp_skb_pcount(skb);
1583 tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss); 1584 tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0);
1584 } 1585 }
1585 1586
1586out: 1587out:
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index dda42f0bd7a3..da2c3b8794f2 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2023,7 +2023,6 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
2023 last_lost = tp->snd_una; 2023 last_lost = tp->snd_una;
2024 } 2024 }
2025 2025
2026 /* First pass: retransmit lost packets. */
2027 tcp_for_write_queue_from(skb, sk) { 2026 tcp_for_write_queue_from(skb, sk) {
2028 __u8 sacked = TCP_SKB_CB(skb)->sacked; 2027 __u8 sacked = TCP_SKB_CB(skb)->sacked;
2029 2028
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 2747ec7bfb63..4660b088a8ce 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -1,6 +1,6 @@
1/* Tom Kelly's Scalable TCP 1/* Tom Kelly's Scalable TCP
2 * 2 *
3 * See htt://www-lce.eng.cam.ac.uk/~ctk21/scalable/ 3 * See http://www.deneholme.net/tom/scalable/
4 * 4 *
5 * John Heffner <jheffner@sc.edu> 5 * John Heffner <jheffner@sc.edu>
6 */ 6 */
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 8fe267feb81e..1bcc3431859e 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -258,11 +258,11 @@ unique:
258 258
259 if (twp != NULL) { 259 if (twp != NULL) {
260 *twp = tw; 260 *twp = tw;
261 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITRECYCLED); 261 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
262 } else if (tw != NULL) { 262 } else if (tw != NULL) {
263 /* Silly. Should hash-dance instead... */ 263 /* Silly. Should hash-dance instead... */
264 inet_twsk_deschedule(tw, death_row); 264 inet_twsk_deschedule(tw, death_row);
265 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITRECYCLED); 265 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
266 266
267 inet_twsk_put(tw); 267 inet_twsk_put(tw);
268 } 268 }
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index c323643ffcf9..72dbb6d1a6b3 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -201,8 +201,9 @@ icmpv6_error(struct net *net, struct sk_buff *skb, unsigned int dataoff,
201 201
202 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && 202 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
203 nf_ip6_checksum(skb, hooknum, dataoff, IPPROTO_ICMPV6)) { 203 nf_ip6_checksum(skb, hooknum, dataoff, IPPROTO_ICMPV6)) {
204 nf_log_packet(PF_INET6, 0, skb, NULL, NULL, NULL, 204 if (LOG_INVALID(net, IPPROTO_ICMPV6))
205 "nf_ct_icmpv6: ICMPv6 checksum failed\n"); 205 nf_log_packet(PF_INET6, 0, skb, NULL, NULL, NULL,
206 "nf_ct_icmpv6: ICMPv6 checksum failed ");
206 return -NF_ACCEPT; 207 return -NF_ACCEPT;
207 } 208 }
208 209
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index fa49dc7fe100..c712e9fc6bba 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -39,7 +39,7 @@
39#endif 39#endif
40 40
41#define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE 41#define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE
42#define NFULNL_TIMEOUT_DEFAULT HZ /* every second */ 42#define NFULNL_TIMEOUT_DEFAULT 100 /* every second */
43#define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */ 43#define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */
44#define NFULNL_COPY_RANGE_MAX 0xFFFF /* max packet size is limited by 16-bit struct nfattr nfa_len field */ 44#define NFULNL_COPY_RANGE_MAX 0xFFFF /* max packet size is limited by 16-bit struct nfattr nfa_len field */
45 45
@@ -590,8 +590,10 @@ nfulnl_log_packet(u_int8_t pf,
590 590
591 qthreshold = inst->qthreshold; 591 qthreshold = inst->qthreshold;
592 /* per-rule qthreshold overrides per-instance */ 592 /* per-rule qthreshold overrides per-instance */
593 if (qthreshold > li->u.ulog.qthreshold) 593 if (li->u.ulog.qthreshold)
594 qthreshold = li->u.ulog.qthreshold; 594 if (qthreshold > li->u.ulog.qthreshold)
595 qthreshold = li->u.ulog.qthreshold;
596
595 597
596 switch (inst->copy_mode) { 598 switch (inst->copy_mode) {
597 case NFULNL_COPY_META: 599 case NFULNL_COPY_META:
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index bfbf521f6ea5..5baccfa5a0de 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -827,59 +827,143 @@ static const struct file_operations xt_table_ops = {
827 .release = seq_release_net, 827 .release = seq_release_net,
828}; 828};
829 829
830static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos) 830/*
831 * Traverse state for ip{,6}_{tables,matches} for helping crossing
832 * the multi-AF mutexes.
833 */
834struct nf_mttg_trav {
835 struct list_head *head, *curr;
836 uint8_t class, nfproto;
837};
838
839enum {
840 MTTG_TRAV_INIT,
841 MTTG_TRAV_NFP_UNSPEC,
842 MTTG_TRAV_NFP_SPEC,
843 MTTG_TRAV_DONE,
844};
845
846static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
847 bool is_target)
831{ 848{
832 struct proc_dir_entry *pde = (struct proc_dir_entry *)seq->private; 849 static const uint8_t next_class[] = {
833 u_int16_t af = (unsigned long)pde->data; 850 [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
851 [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE,
852 };
853 struct nf_mttg_trav *trav = seq->private;
854
855 switch (trav->class) {
856 case MTTG_TRAV_INIT:
857 trav->class = MTTG_TRAV_NFP_UNSPEC;
858 mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
859 trav->head = trav->curr = is_target ?
860 &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
861 break;
862 case MTTG_TRAV_NFP_UNSPEC:
863 trav->curr = trav->curr->next;
864 if (trav->curr != trav->head)
865 break;
866 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
867 mutex_lock(&xt[trav->nfproto].mutex);
868 trav->head = trav->curr = is_target ?
869 &xt[trav->nfproto].target : &xt[trav->nfproto].match;
870 trav->class = next_class[trav->class];
871 break;
872 case MTTG_TRAV_NFP_SPEC:
873 trav->curr = trav->curr->next;
874 if (trav->curr != trav->head)
875 break;
876 /* fallthru, _stop will unlock */
877 default:
878 return NULL;
879 }
834 880
835 mutex_lock(&xt[af].mutex); 881 if (ppos != NULL)
836 return seq_list_start(&xt[af].match, *pos); 882 ++*ppos;
883 return trav;
837} 884}
838 885
839static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *pos) 886static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
887 bool is_target)
840{ 888{
841 struct proc_dir_entry *pde = (struct proc_dir_entry *)seq->private; 889 struct nf_mttg_trav *trav = seq->private;
842 u_int16_t af = (unsigned long)pde->data; 890 unsigned int j;
843 891
844 return seq_list_next(v, &xt[af].match, pos); 892 trav->class = MTTG_TRAV_INIT;
893 for (j = 0; j < *pos; ++j)
894 if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
895 return NULL;
896 return trav;
845} 897}
846 898
847static void xt_match_seq_stop(struct seq_file *seq, void *v) 899static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
848{ 900{
849 struct proc_dir_entry *pde = seq->private; 901 struct nf_mttg_trav *trav = seq->private;
850 u_int16_t af = (unsigned long)pde->data; 902
903 switch (trav->class) {
904 case MTTG_TRAV_NFP_UNSPEC:
905 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
906 break;
907 case MTTG_TRAV_NFP_SPEC:
908 mutex_unlock(&xt[trav->nfproto].mutex);
909 break;
910 }
911}
851 912
852 mutex_unlock(&xt[af].mutex); 913static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
914{
915 return xt_mttg_seq_start(seq, pos, false);
853} 916}
854 917
855static int xt_match_seq_show(struct seq_file *seq, void *v) 918static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
856{ 919{
857 struct xt_match *match = list_entry(v, struct xt_match, list); 920 return xt_mttg_seq_next(seq, v, ppos, false);
921}
858 922
859 if (strlen(match->name)) 923static int xt_match_seq_show(struct seq_file *seq, void *v)
860 return seq_printf(seq, "%s\n", match->name); 924{
861 else 925 const struct nf_mttg_trav *trav = seq->private;
862 return 0; 926 const struct xt_match *match;
927
928 switch (trav->class) {
929 case MTTG_TRAV_NFP_UNSPEC:
930 case MTTG_TRAV_NFP_SPEC:
931 if (trav->curr == trav->head)
932 return 0;
933 match = list_entry(trav->curr, struct xt_match, list);
934 return (*match->name == '\0') ? 0 :
935 seq_printf(seq, "%s\n", match->name);
936 }
937 return 0;
863} 938}
864 939
865static const struct seq_operations xt_match_seq_ops = { 940static const struct seq_operations xt_match_seq_ops = {
866 .start = xt_match_seq_start, 941 .start = xt_match_seq_start,
867 .next = xt_match_seq_next, 942 .next = xt_match_seq_next,
868 .stop = xt_match_seq_stop, 943 .stop = xt_mttg_seq_stop,
869 .show = xt_match_seq_show, 944 .show = xt_match_seq_show,
870}; 945};
871 946
872static int xt_match_open(struct inode *inode, struct file *file) 947static int xt_match_open(struct inode *inode, struct file *file)
873{ 948{
949 struct seq_file *seq;
950 struct nf_mttg_trav *trav;
874 int ret; 951 int ret;
875 952
876 ret = seq_open(file, &xt_match_seq_ops); 953 trav = kmalloc(sizeof(*trav), GFP_KERNEL);
877 if (!ret) { 954 if (trav == NULL)
878 struct seq_file *seq = file->private_data; 955 return -ENOMEM;
879 956
880 seq->private = PDE(inode); 957 ret = seq_open(file, &xt_match_seq_ops);
958 if (ret < 0) {
959 kfree(trav);
960 return ret;
881 } 961 }
882 return ret; 962
963 seq = file->private_data;
964 seq->private = trav;
965 trav->nfproto = (unsigned long)PDE(inode)->data;
966 return 0;
883} 967}
884 968
885static const struct file_operations xt_match_ops = { 969static const struct file_operations xt_match_ops = {
@@ -887,62 +971,63 @@ static const struct file_operations xt_match_ops = {
887 .open = xt_match_open, 971 .open = xt_match_open,
888 .read = seq_read, 972 .read = seq_read,
889 .llseek = seq_lseek, 973 .llseek = seq_lseek,
890 .release = seq_release, 974 .release = seq_release_private,
891}; 975};
892 976
893static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos) 977static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
894{ 978{
895 struct proc_dir_entry *pde = (struct proc_dir_entry *)seq->private; 979 return xt_mttg_seq_start(seq, pos, true);
896 u_int16_t af = (unsigned long)pde->data;
897
898 mutex_lock(&xt[af].mutex);
899 return seq_list_start(&xt[af].target, *pos);
900} 980}
901 981
902static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *pos) 982static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
903{ 983{
904 struct proc_dir_entry *pde = (struct proc_dir_entry *)seq->private; 984 return xt_mttg_seq_next(seq, v, ppos, true);
905 u_int16_t af = (unsigned long)pde->data;
906
907 return seq_list_next(v, &xt[af].target, pos);
908}
909
910static void xt_target_seq_stop(struct seq_file *seq, void *v)
911{
912 struct proc_dir_entry *pde = seq->private;
913 u_int16_t af = (unsigned long)pde->data;
914
915 mutex_unlock(&xt[af].mutex);
916} 985}
917 986
918static int xt_target_seq_show(struct seq_file *seq, void *v) 987static int xt_target_seq_show(struct seq_file *seq, void *v)
919{ 988{
920 struct xt_target *target = list_entry(v, struct xt_target, list); 989 const struct nf_mttg_trav *trav = seq->private;
921 990 const struct xt_target *target;
922 if (strlen(target->name)) 991
923 return seq_printf(seq, "%s\n", target->name); 992 switch (trav->class) {
924 else 993 case MTTG_TRAV_NFP_UNSPEC:
925 return 0; 994 case MTTG_TRAV_NFP_SPEC:
995 if (trav->curr == trav->head)
996 return 0;
997 target = list_entry(trav->curr, struct xt_target, list);
998 return (*target->name == '\0') ? 0 :
999 seq_printf(seq, "%s\n", target->name);
1000 }
1001 return 0;
926} 1002}
927 1003
928static const struct seq_operations xt_target_seq_ops = { 1004static const struct seq_operations xt_target_seq_ops = {
929 .start = xt_target_seq_start, 1005 .start = xt_target_seq_start,
930 .next = xt_target_seq_next, 1006 .next = xt_target_seq_next,
931 .stop = xt_target_seq_stop, 1007 .stop = xt_mttg_seq_stop,
932 .show = xt_target_seq_show, 1008 .show = xt_target_seq_show,
933}; 1009};
934 1010
935static int xt_target_open(struct inode *inode, struct file *file) 1011static int xt_target_open(struct inode *inode, struct file *file)
936{ 1012{
1013 struct seq_file *seq;
1014 struct nf_mttg_trav *trav;
937 int ret; 1015 int ret;
938 1016
939 ret = seq_open(file, &xt_target_seq_ops); 1017 trav = kmalloc(sizeof(*trav), GFP_KERNEL);
940 if (!ret) { 1018 if (trav == NULL)
941 struct seq_file *seq = file->private_data; 1019 return -ENOMEM;
942 1020
943 seq->private = PDE(inode); 1021 ret = seq_open(file, &xt_target_seq_ops);
1022 if (ret < 0) {
1023 kfree(trav);
1024 return ret;
944 } 1025 }
945 return ret; 1026
1027 seq = file->private_data;
1028 seq->private = trav;
1029 trav->nfproto = (unsigned long)PDE(inode)->data;
1030 return 0;
946} 1031}
947 1032
948static const struct file_operations xt_target_ops = { 1033static const struct file_operations xt_target_ops = {
@@ -950,7 +1035,7 @@ static const struct file_operations xt_target_ops = {
950 .open = xt_target_open, 1035 .open = xt_target_open,
951 .read = seq_read, 1036 .read = seq_read,
952 .llseek = seq_lseek, 1037 .llseek = seq_lseek,
953 .release = seq_release, 1038 .release = seq_release_private,
954}; 1039};
955 1040
956#define FORMAT_TABLES "_tables_names" 1041#define FORMAT_TABLES "_tables_names"
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index fe80b614a400..791e030ea903 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -542,7 +542,7 @@ recent_mt_proc_write(struct file *file, const char __user *input,
542 struct recent_entry *e; 542 struct recent_entry *e;
543 char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:5afe:c0de")]; 543 char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:5afe:c0de")];
544 const char *c = buf; 544 const char *c = buf;
545 union nf_inet_addr addr; 545 union nf_inet_addr addr = {};
546 u_int16_t family; 546 u_int16_t family;
547 bool add, succ; 547 bool add, succ;
548 548
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index f6b4fa97df70..e36e94ab4e10 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -66,11 +66,15 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
66{ 66{
67 struct drr_sched *q = qdisc_priv(sch); 67 struct drr_sched *q = qdisc_priv(sch);
68 struct drr_class *cl = (struct drr_class *)*arg; 68 struct drr_class *cl = (struct drr_class *)*arg;
69 struct nlattr *opt = tca[TCA_OPTIONS];
69 struct nlattr *tb[TCA_DRR_MAX + 1]; 70 struct nlattr *tb[TCA_DRR_MAX + 1];
70 u32 quantum; 71 u32 quantum;
71 int err; 72 int err;
72 73
73 err = nla_parse_nested(tb, TCA_DRR_MAX, tca[TCA_OPTIONS], drr_policy); 74 if (!opt)
75 return -EINVAL;
76
77 err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy);
74 if (err < 0) 78 if (err < 0)
75 return err; 79 return err;
76 80