aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-06-15 16:49:24 -0400
committerDavid S. Miller <davem@davemloft.net>2010-06-15 16:49:24 -0400
commit16fb62b6b4d57339a0ec931b3fb8c8d0ca6414e8 (patch)
treea1041342f31a626baf3a08d09d5c81a65dd8ef28
parenta3433f35a55f7604742cae620c6dc6edfc70db6a (diff)
parentf9181f4ffc71d7b7dd1906c9a11d51d6659220ae (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-next-2.6
-rw-r--r--include/linux/netfilter/Kbuild1
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h4
-rw-r--r--include/linux/netfilter/nfnetlink_log.h1
-rw-r--r--include/linux/netfilter/xt_IDLETIMER.h45
-rw-r--r--include/net/netfilter/nf_conntrack.h11
-rw-r--r--include/net/netfilter/nf_conntrack_core.h2
-rw-r--r--include/net/netfilter/xt_rateest.h10
-rw-r--r--net/bridge/br_netfilter.c6
-rw-r--r--net/ipv4/netfilter.c4
-rw-r--r--net/ipv4/netfilter/arp_tables.c7
-rw-r--r--net/ipv4/netfilter/ip_queue.c57
-rw-r--r--net/ipv4/netfilter/ip_tables.c4
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c48
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_standalone.c2
-rw-r--r--net/ipv6/netfilter.c4
-rw-r--r--net/ipv6/netfilter/ip6_queue.c57
-rw-r--r--net/ipv6/netfilter/ip6_tables.c7
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c9
-rw-r--r--net/netfilter/Kconfig12
-rw-r--r--net/netfilter/Makefile1
-rw-r--r--net/netfilter/nf_conntrack_core.c40
-rw-r--r--net/netfilter/nf_conntrack_netlink.c2
-rw-r--r--net/netfilter/nfnetlink_log.c67
-rw-r--r--net/netfilter/nfnetlink_queue.c33
-rw-r--r--net/netfilter/xt_CT.c4
-rw-r--r--net/netfilter/xt_IDLETIMER.c314
-rw-r--r--net/netfilter/xt_NOTRACK.c2
-rw-r--r--net/netfilter/xt_TEE.c4
-rw-r--r--net/netfilter/xt_cluster.c2
-rw-r--r--net/netfilter/xt_conntrack.c11
-rw-r--r--net/netfilter/xt_sctp.c3
-rw-r--r--net/netfilter/xt_socket.c2
-rw-r--r--net/netfilter/xt_state.c14
-rw-r--r--net/netfilter/xt_statistic.c19
36 files changed, 608 insertions, 205 deletions
diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild
index 48767cd16453..bb103f43afa0 100644
--- a/include/linux/netfilter/Kbuild
+++ b/include/linux/netfilter/Kbuild
@@ -8,6 +8,7 @@ header-y += xt_CONNMARK.h
8header-y += xt_CONNSECMARK.h 8header-y += xt_CONNSECMARK.h
9header-y += xt_CT.h 9header-y += xt_CT.h
10header-y += xt_DSCP.h 10header-y += xt_DSCP.h
11header-y += xt_IDLETIMER.h
11header-y += xt_LED.h 12header-y += xt_LED.h
12header-y += xt_MARK.h 13header-y += xt_MARK.h
13header-y += xt_NFLOG.h 14header-y += xt_NFLOG.h
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index 14e6d32002c4..1afd18c855ec 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -76,6 +76,10 @@ enum ip_conntrack_status {
76 /* Conntrack is a template */ 76 /* Conntrack is a template */
77 IPS_TEMPLATE_BIT = 11, 77 IPS_TEMPLATE_BIT = 11,
78 IPS_TEMPLATE = (1 << IPS_TEMPLATE_BIT), 78 IPS_TEMPLATE = (1 << IPS_TEMPLATE_BIT),
79
80 /* Conntrack is a fake untracked entry */
81 IPS_UNTRACKED_BIT = 12,
82 IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT),
79}; 83};
80 84
81/* Connection tracking event types */ 85/* Connection tracking event types */
diff --git a/include/linux/netfilter/nfnetlink_log.h b/include/linux/netfilter/nfnetlink_log.h
index d3bab7a2c9b7..1d0b84aa1d42 100644
--- a/include/linux/netfilter/nfnetlink_log.h
+++ b/include/linux/netfilter/nfnetlink_log.h
@@ -89,6 +89,7 @@ enum nfulnl_attr_config {
89#define NFULNL_COPY_NONE 0x00 89#define NFULNL_COPY_NONE 0x00
90#define NFULNL_COPY_META 0x01 90#define NFULNL_COPY_META 0x01
91#define NFULNL_COPY_PACKET 0x02 91#define NFULNL_COPY_PACKET 0x02
92#define NFULNL_COPY_DISABLED 0x03
92 93
93#define NFULNL_CFG_F_SEQ 0x0001 94#define NFULNL_CFG_F_SEQ 0x0001
94#define NFULNL_CFG_F_SEQ_GLOBAL 0x0002 95#define NFULNL_CFG_F_SEQ_GLOBAL 0x0002
diff --git a/include/linux/netfilter/xt_IDLETIMER.h b/include/linux/netfilter/xt_IDLETIMER.h
new file mode 100644
index 000000000000..3e1aa1be942e
--- /dev/null
+++ b/include/linux/netfilter/xt_IDLETIMER.h
@@ -0,0 +1,45 @@
1/*
2 * linux/include/linux/netfilter/xt_IDLETIMER.h
3 *
4 * Header file for Xtables timer target module.
5 *
6 * Copyright (C) 2004, 2010 Nokia Corporation
7 * Written by Timo Teras <ext-timo.teras@nokia.com>
8 *
9 * Converted to x_tables and forward-ported to 2.6.34
10 * by Luciano Coelho <luciano.coelho@nokia.com>
11 *
12 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * version 2 as published by the Free Software Foundation.
17 *
18 * This program is distributed in the hope that it will be useful, but
19 * WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
26 * 02110-1301 USA
27 */
28
29#ifndef _XT_IDLETIMER_H
30#define _XT_IDLETIMER_H
31
32#include <linux/types.h>
33
34#define MAX_IDLETIMER_LABEL_SIZE 28
35
36struct idletimer_tg_info {
37 __u32 timeout;
38
39 char label[MAX_IDLETIMER_LABEL_SIZE];
40
41 /* for kernel module internal use only */
42 struct idletimer_tg *timer __attribute((aligned(8)));
43};
44
45#endif
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index bbfdd9453087..e624dae54fa4 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -257,7 +257,12 @@ extern s16 (*nf_ct_nat_offset)(const struct nf_conn *ct,
257 u32 seq); 257 u32 seq);
258 258
259/* Fake conntrack entry for untracked connections */ 259/* Fake conntrack entry for untracked connections */
260extern struct nf_conn nf_conntrack_untracked; 260DECLARE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
261static inline struct nf_conn *nf_ct_untracked_get(void)
262{
263 return &__raw_get_cpu_var(nf_conntrack_untracked);
264}
265extern void nf_ct_untracked_status_or(unsigned long bits);
261 266
262/* Iterate over all conntracks: if iter returns true, it's deleted. */ 267/* Iterate over all conntracks: if iter returns true, it's deleted. */
263extern void 268extern void
@@ -285,9 +290,9 @@ static inline int nf_ct_is_dying(struct nf_conn *ct)
285 return test_bit(IPS_DYING_BIT, &ct->status); 290 return test_bit(IPS_DYING_BIT, &ct->status);
286} 291}
287 292
288static inline int nf_ct_is_untracked(const struct sk_buff *skb) 293static inline int nf_ct_is_untracked(const struct nf_conn *ct)
289{ 294{
290 return (skb->nfct == &nf_conntrack_untracked.ct_general); 295 return test_bit(IPS_UNTRACKED_BIT, &ct->status);
291} 296}
292 297
293extern int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp); 298extern int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index 3d7524fba194..aced085132e7 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -60,7 +60,7 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb)
60 struct nf_conn *ct = (struct nf_conn *)skb->nfct; 60 struct nf_conn *ct = (struct nf_conn *)skb->nfct;
61 int ret = NF_ACCEPT; 61 int ret = NF_ACCEPT;
62 62
63 if (ct && ct != &nf_conntrack_untracked) { 63 if (ct && !nf_ct_is_untracked(ct)) {
64 if (!nf_ct_is_confirmed(ct)) 64 if (!nf_ct_is_confirmed(ct))
65 ret = __nf_conntrack_confirm(skb); 65 ret = __nf_conntrack_confirm(skb);
66 if (likely(ret == NF_ACCEPT)) 66 if (likely(ret == NF_ACCEPT))
diff --git a/include/net/netfilter/xt_rateest.h b/include/net/netfilter/xt_rateest.h
index 5e1427795928..5a2978d1cb22 100644
--- a/include/net/netfilter/xt_rateest.h
+++ b/include/net/netfilter/xt_rateest.h
@@ -2,13 +2,17 @@
2#define _XT_RATEEST_H 2#define _XT_RATEEST_H
3 3
4struct xt_rateest { 4struct xt_rateest {
5 /* keep lock and bstats on same cache line to speedup xt_rateest_tg() */
6 struct gnet_stats_basic_packed bstats;
7 spinlock_t lock;
8 /* keep rstats and lock on same cache line to speedup xt_rateest_mt() */
9 struct gnet_stats_rate_est rstats;
10
11 /* following fields not accessed in hot path */
5 struct hlist_node list; 12 struct hlist_node list;
6 char name[IFNAMSIZ]; 13 char name[IFNAMSIZ];
7 unsigned int refcnt; 14 unsigned int refcnt;
8 spinlock_t lock;
9 struct gnet_estimator params; 15 struct gnet_estimator params;
10 struct gnet_stats_rate_est rstats;
11 struct gnet_stats_basic_packed bstats;
12 struct rcu_head rcu; 16 struct rcu_head rcu;
13}; 17};
14 18
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index f54404ddee53..84060bc48f11 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -245,8 +245,7 @@ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
245 kfree_skb(skb); 245 kfree_skb(skb);
246 return 0; 246 return 0;
247 } 247 }
248 dst_hold(&rt->dst); 248 skb_dst_set_noref(skb, &rt->dst);
249 skb_dst_set(skb, &rt->dst);
250 249
251 skb->dev = nf_bridge->physindev; 250 skb->dev = nf_bridge->physindev;
252 nf_bridge_update_protocol(skb); 251 nf_bridge_update_protocol(skb);
@@ -397,8 +396,7 @@ bridged_dnat:
397 kfree_skb(skb); 396 kfree_skb(skb);
398 return 0; 397 return 0;
399 } 398 }
400 dst_hold(&rt->dst); 399 skb_dst_set_noref(skb, &rt->dst);
401 skb_dst_set(skb, &rt->dst);
402 } 400 }
403 401
404 skb->dev = nf_bridge->physindev; 402 skb->dev = nf_bridge->physindev;
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index cfbc79af21c3..d88a46c54fd1 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -212,9 +212,7 @@ static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
212 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, protocol, 212 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, protocol,
213 skb->len - dataoff, 0); 213 skb->len - dataoff, 0);
214 skb->ip_summed = CHECKSUM_NONE; 214 skb->ip_summed = CHECKSUM_NONE;
215 csum = __skb_checksum_complete_head(skb, dataoff + len); 215 return __skb_checksum_complete_head(skb, dataoff + len);
216 if (!csum)
217 skb->ip_summed = CHECKSUM_UNNECESSARY;
218 } 216 }
219 return csum; 217 return csum;
220} 218}
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 1ac01b128621..16c0ba0a2728 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -758,7 +758,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
758 * about). 758 * about).
759 */ 759 */
760 countersize = sizeof(struct xt_counters) * private->number; 760 countersize = sizeof(struct xt_counters) * private->number;
761 counters = vmalloc_node(countersize, numa_node_id()); 761 counters = vmalloc(countersize);
762 762
763 if (counters == NULL) 763 if (counters == NULL)
764 return ERR_PTR(-ENOMEM); 764 return ERR_PTR(-ENOMEM);
@@ -1005,8 +1005,7 @@ static int __do_replace(struct net *net, const char *name,
1005 struct arpt_entry *iter; 1005 struct arpt_entry *iter;
1006 1006
1007 ret = 0; 1007 ret = 0;
1008 counters = vmalloc_node(num_counters * sizeof(struct xt_counters), 1008 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1009 numa_node_id());
1010 if (!counters) { 1009 if (!counters) {
1011 ret = -ENOMEM; 1010 ret = -ENOMEM;
1012 goto out; 1011 goto out;
@@ -1159,7 +1158,7 @@ static int do_add_counters(struct net *net, const void __user *user,
1159 if (len != size + num_counters * sizeof(struct xt_counters)) 1158 if (len != size + num_counters * sizeof(struct xt_counters))
1160 return -EINVAL; 1159 return -EINVAL;
1161 1160
1162 paddc = vmalloc_node(len - size, numa_node_id()); 1161 paddc = vmalloc(len - size);
1163 if (!paddc) 1162 if (!paddc)
1164 return -ENOMEM; 1163 return -ENOMEM;
1165 1164
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index a4e5fc5df4bf..d2c1311cb28d 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -42,7 +42,7 @@ typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long);
42 42
43static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE; 43static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
44static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT; 44static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
45static DEFINE_RWLOCK(queue_lock); 45static DEFINE_SPINLOCK(queue_lock);
46static int peer_pid __read_mostly; 46static int peer_pid __read_mostly;
47static unsigned int copy_range __read_mostly; 47static unsigned int copy_range __read_mostly;
48static unsigned int queue_total; 48static unsigned int queue_total;
@@ -72,10 +72,10 @@ __ipq_set_mode(unsigned char mode, unsigned int range)
72 break; 72 break;
73 73
74 case IPQ_COPY_PACKET: 74 case IPQ_COPY_PACKET:
75 copy_mode = mode; 75 if (range > 0xFFFF)
76 range = 0xFFFF;
76 copy_range = range; 77 copy_range = range;
77 if (copy_range > 0xFFFF) 78 copy_mode = mode;
78 copy_range = 0xFFFF;
79 break; 79 break;
80 80
81 default: 81 default:
@@ -101,7 +101,7 @@ ipq_find_dequeue_entry(unsigned long id)
101{ 101{
102 struct nf_queue_entry *entry = NULL, *i; 102 struct nf_queue_entry *entry = NULL, *i;
103 103
104 write_lock_bh(&queue_lock); 104 spin_lock_bh(&queue_lock);
105 105
106 list_for_each_entry(i, &queue_list, list) { 106 list_for_each_entry(i, &queue_list, list) {
107 if ((unsigned long)i == id) { 107 if ((unsigned long)i == id) {
@@ -115,7 +115,7 @@ ipq_find_dequeue_entry(unsigned long id)
115 queue_total--; 115 queue_total--;
116 } 116 }
117 117
118 write_unlock_bh(&queue_lock); 118 spin_unlock_bh(&queue_lock);
119 return entry; 119 return entry;
120} 120}
121 121
@@ -136,9 +136,9 @@ __ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
136static void 136static void
137ipq_flush(ipq_cmpfn cmpfn, unsigned long data) 137ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
138{ 138{
139 write_lock_bh(&queue_lock); 139 spin_lock_bh(&queue_lock);
140 __ipq_flush(cmpfn, data); 140 __ipq_flush(cmpfn, data);
141 write_unlock_bh(&queue_lock); 141 spin_unlock_bh(&queue_lock);
142} 142}
143 143
144static struct sk_buff * 144static struct sk_buff *
@@ -152,9 +152,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
152 struct nlmsghdr *nlh; 152 struct nlmsghdr *nlh;
153 struct timeval tv; 153 struct timeval tv;
154 154
155 read_lock_bh(&queue_lock); 155 switch (ACCESS_ONCE(copy_mode)) {
156
157 switch (copy_mode) {
158 case IPQ_COPY_META: 156 case IPQ_COPY_META:
159 case IPQ_COPY_NONE: 157 case IPQ_COPY_NONE:
160 size = NLMSG_SPACE(sizeof(*pmsg)); 158 size = NLMSG_SPACE(sizeof(*pmsg));
@@ -162,26 +160,21 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
162 160
163 case IPQ_COPY_PACKET: 161 case IPQ_COPY_PACKET:
164 if (entry->skb->ip_summed == CHECKSUM_PARTIAL && 162 if (entry->skb->ip_summed == CHECKSUM_PARTIAL &&
165 (*errp = skb_checksum_help(entry->skb))) { 163 (*errp = skb_checksum_help(entry->skb)))
166 read_unlock_bh(&queue_lock);
167 return NULL; 164 return NULL;
168 } 165
169 if (copy_range == 0 || copy_range > entry->skb->len) 166 data_len = ACCESS_ONCE(copy_range);
167 if (data_len == 0 || data_len > entry->skb->len)
170 data_len = entry->skb->len; 168 data_len = entry->skb->len;
171 else
172 data_len = copy_range;
173 169
174 size = NLMSG_SPACE(sizeof(*pmsg) + data_len); 170 size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
175 break; 171 break;
176 172
177 default: 173 default:
178 *errp = -EINVAL; 174 *errp = -EINVAL;
179 read_unlock_bh(&queue_lock);
180 return NULL; 175 return NULL;
181 } 176 }
182 177
183 read_unlock_bh(&queue_lock);
184
185 skb = alloc_skb(size, GFP_ATOMIC); 178 skb = alloc_skb(size, GFP_ATOMIC);
186 if (!skb) 179 if (!skb)
187 goto nlmsg_failure; 180 goto nlmsg_failure;
@@ -242,7 +235,7 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
242 if (nskb == NULL) 235 if (nskb == NULL)
243 return status; 236 return status;
244 237
245 write_lock_bh(&queue_lock); 238 spin_lock_bh(&queue_lock);
246 239
247 if (!peer_pid) 240 if (!peer_pid)
248 goto err_out_free_nskb; 241 goto err_out_free_nskb;
@@ -266,14 +259,14 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
266 259
267 __ipq_enqueue_entry(entry); 260 __ipq_enqueue_entry(entry);
268 261
269 write_unlock_bh(&queue_lock); 262 spin_unlock_bh(&queue_lock);
270 return status; 263 return status;
271 264
272err_out_free_nskb: 265err_out_free_nskb:
273 kfree_skb(nskb); 266 kfree_skb(nskb);
274 267
275err_out_unlock: 268err_out_unlock:
276 write_unlock_bh(&queue_lock); 269 spin_unlock_bh(&queue_lock);
277 return status; 270 return status;
278} 271}
279 272
@@ -342,9 +335,9 @@ ipq_set_mode(unsigned char mode, unsigned int range)
342{ 335{
343 int status; 336 int status;
344 337
345 write_lock_bh(&queue_lock); 338 spin_lock_bh(&queue_lock);
346 status = __ipq_set_mode(mode, range); 339 status = __ipq_set_mode(mode, range);
347 write_unlock_bh(&queue_lock); 340 spin_unlock_bh(&queue_lock);
348 return status; 341 return status;
349} 342}
350 343
@@ -440,11 +433,11 @@ __ipq_rcv_skb(struct sk_buff *skb)
440 if (security_netlink_recv(skb, CAP_NET_ADMIN)) 433 if (security_netlink_recv(skb, CAP_NET_ADMIN))
441 RCV_SKB_FAIL(-EPERM); 434 RCV_SKB_FAIL(-EPERM);
442 435
443 write_lock_bh(&queue_lock); 436 spin_lock_bh(&queue_lock);
444 437
445 if (peer_pid) { 438 if (peer_pid) {
446 if (peer_pid != pid) { 439 if (peer_pid != pid) {
447 write_unlock_bh(&queue_lock); 440 spin_unlock_bh(&queue_lock);
448 RCV_SKB_FAIL(-EBUSY); 441 RCV_SKB_FAIL(-EBUSY);
449 } 442 }
450 } else { 443 } else {
@@ -452,7 +445,7 @@ __ipq_rcv_skb(struct sk_buff *skb)
452 peer_pid = pid; 445 peer_pid = pid;
453 } 446 }
454 447
455 write_unlock_bh(&queue_lock); 448 spin_unlock_bh(&queue_lock);
456 449
457 status = ipq_receive_peer(NLMSG_DATA(nlh), type, 450 status = ipq_receive_peer(NLMSG_DATA(nlh), type,
458 nlmsglen - NLMSG_LENGTH(0)); 451 nlmsglen - NLMSG_LENGTH(0));
@@ -497,10 +490,10 @@ ipq_rcv_nl_event(struct notifier_block *this,
497 struct netlink_notify *n = ptr; 490 struct netlink_notify *n = ptr;
498 491
499 if (event == NETLINK_URELEASE && n->protocol == NETLINK_FIREWALL) { 492 if (event == NETLINK_URELEASE && n->protocol == NETLINK_FIREWALL) {
500 write_lock_bh(&queue_lock); 493 spin_lock_bh(&queue_lock);
501 if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid)) 494 if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid))
502 __ipq_reset(); 495 __ipq_reset();
503 write_unlock_bh(&queue_lock); 496 spin_unlock_bh(&queue_lock);
504 } 497 }
505 return NOTIFY_DONE; 498 return NOTIFY_DONE;
506} 499}
@@ -527,7 +520,7 @@ static ctl_table ipq_table[] = {
527#ifdef CONFIG_PROC_FS 520#ifdef CONFIG_PROC_FS
528static int ip_queue_show(struct seq_file *m, void *v) 521static int ip_queue_show(struct seq_file *m, void *v)
529{ 522{
530 read_lock_bh(&queue_lock); 523 spin_lock_bh(&queue_lock);
531 524
532 seq_printf(m, 525 seq_printf(m,
533 "Peer PID : %d\n" 526 "Peer PID : %d\n"
@@ -545,7 +538,7 @@ static int ip_queue_show(struct seq_file *m, void *v)
545 queue_dropped, 538 queue_dropped,
546 queue_user_dropped); 539 queue_user_dropped);
547 540
548 read_unlock_bh(&queue_lock); 541 spin_unlock_bh(&queue_lock);
549 return 0; 542 return 0;
550} 543}
551 544
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 4b6c5ca610fc..b38c11810c65 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -928,7 +928,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
928 (other than comefrom, which userspace doesn't care 928 (other than comefrom, which userspace doesn't care
929 about). */ 929 about). */
930 countersize = sizeof(struct xt_counters) * private->number; 930 countersize = sizeof(struct xt_counters) * private->number;
931 counters = vmalloc_node(countersize, numa_node_id()); 931 counters = vmalloc(countersize);
932 932
933 if (counters == NULL) 933 if (counters == NULL)
934 return ERR_PTR(-ENOMEM); 934 return ERR_PTR(-ENOMEM);
@@ -1352,7 +1352,7 @@ do_add_counters(struct net *net, const void __user *user,
1352 if (len != size + num_counters * sizeof(struct xt_counters)) 1352 if (len != size + num_counters * sizeof(struct xt_counters))
1353 return -EINVAL; 1353 return -EINVAL;
1354 1354
1355 paddc = vmalloc_node(len - size, numa_node_id()); 1355 paddc = vmalloc(len - size);
1356 if (!paddc) 1356 if (!paddc)
1357 return -ENOMEM; 1357 return -ENOMEM;
1358 1358
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index f91c94b9a790..64d0875f5192 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -53,12 +53,13 @@ struct clusterip_config {
53#endif 53#endif
54 enum clusterip_hashmode hash_mode; /* which hashing mode */ 54 enum clusterip_hashmode hash_mode; /* which hashing mode */
55 u_int32_t hash_initval; /* hash initialization */ 55 u_int32_t hash_initval; /* hash initialization */
56 struct rcu_head rcu;
56}; 57};
57 58
58static LIST_HEAD(clusterip_configs); 59static LIST_HEAD(clusterip_configs);
59 60
60/* clusterip_lock protects the clusterip_configs list */ 61/* clusterip_lock protects the clusterip_configs list */
61static DEFINE_RWLOCK(clusterip_lock); 62static DEFINE_SPINLOCK(clusterip_lock);
62 63
63#ifdef CONFIG_PROC_FS 64#ifdef CONFIG_PROC_FS
64static const struct file_operations clusterip_proc_fops; 65static const struct file_operations clusterip_proc_fops;
@@ -71,11 +72,17 @@ clusterip_config_get(struct clusterip_config *c)
71 atomic_inc(&c->refcount); 72 atomic_inc(&c->refcount);
72} 73}
73 74
75
76static void clusterip_config_rcu_free(struct rcu_head *head)
77{
78 kfree(container_of(head, struct clusterip_config, rcu));
79}
80
74static inline void 81static inline void
75clusterip_config_put(struct clusterip_config *c) 82clusterip_config_put(struct clusterip_config *c)
76{ 83{
77 if (atomic_dec_and_test(&c->refcount)) 84 if (atomic_dec_and_test(&c->refcount))
78 kfree(c); 85 call_rcu_bh(&c->rcu, clusterip_config_rcu_free);
79} 86}
80 87
81/* decrease the count of entries using/referencing this config. If last 88/* decrease the count of entries using/referencing this config. If last
@@ -84,10 +91,11 @@ clusterip_config_put(struct clusterip_config *c)
84static inline void 91static inline void
85clusterip_config_entry_put(struct clusterip_config *c) 92clusterip_config_entry_put(struct clusterip_config *c)
86{ 93{
87 write_lock_bh(&clusterip_lock); 94 local_bh_disable();
88 if (atomic_dec_and_test(&c->entries)) { 95 if (atomic_dec_and_lock(&c->entries, &clusterip_lock)) {
89 list_del(&c->list); 96 list_del_rcu(&c->list);
90 write_unlock_bh(&clusterip_lock); 97 spin_unlock(&clusterip_lock);
98 local_bh_enable();
91 99
92 dev_mc_del(c->dev, c->clustermac); 100 dev_mc_del(c->dev, c->clustermac);
93 dev_put(c->dev); 101 dev_put(c->dev);
@@ -100,7 +108,7 @@ clusterip_config_entry_put(struct clusterip_config *c)
100#endif 108#endif
101 return; 109 return;
102 } 110 }
103 write_unlock_bh(&clusterip_lock); 111 local_bh_enable();
104} 112}
105 113
106static struct clusterip_config * 114static struct clusterip_config *
@@ -108,7 +116,7 @@ __clusterip_config_find(__be32 clusterip)
108{ 116{
109 struct clusterip_config *c; 117 struct clusterip_config *c;
110 118
111 list_for_each_entry(c, &clusterip_configs, list) { 119 list_for_each_entry_rcu(c, &clusterip_configs, list) {
112 if (c->clusterip == clusterip) 120 if (c->clusterip == clusterip)
113 return c; 121 return c;
114 } 122 }
@@ -121,16 +129,15 @@ clusterip_config_find_get(__be32 clusterip, int entry)
121{ 129{
122 struct clusterip_config *c; 130 struct clusterip_config *c;
123 131
124 read_lock_bh(&clusterip_lock); 132 rcu_read_lock_bh();
125 c = __clusterip_config_find(clusterip); 133 c = __clusterip_config_find(clusterip);
126 if (!c) { 134 if (c) {
127 read_unlock_bh(&clusterip_lock); 135 if (unlikely(!atomic_inc_not_zero(&c->refcount)))
128 return NULL; 136 c = NULL;
137 else if (entry)
138 atomic_inc(&c->entries);
129 } 139 }
130 atomic_inc(&c->refcount); 140 rcu_read_unlock_bh();
131 if (entry)
132 atomic_inc(&c->entries);
133 read_unlock_bh(&clusterip_lock);
134 141
135 return c; 142 return c;
136} 143}
@@ -181,9 +188,9 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
181 } 188 }
182#endif 189#endif
183 190
184 write_lock_bh(&clusterip_lock); 191 spin_lock_bh(&clusterip_lock);
185 list_add(&c->list, &clusterip_configs); 192 list_add_rcu(&c->list, &clusterip_configs);
186 write_unlock_bh(&clusterip_lock); 193 spin_unlock_bh(&clusterip_lock);
187 194
188 return c; 195 return c;
189} 196}
@@ -733,6 +740,9 @@ static void __exit clusterip_tg_exit(void)
733#endif 740#endif
734 nf_unregister_hook(&cip_arp_ops); 741 nf_unregister_hook(&cip_arp_ops);
735 xt_unregister_target(&clusterip_tg_reg); 742 xt_unregister_target(&clusterip_tg_reg);
743
744 /* Wait for completion of call_rcu_bh()'s (clusterip_config_rcu_free) */
745 rcu_barrier_bh();
736} 746}
737 747
738module_init(clusterip_tg_init); 748module_init(clusterip_tg_init);
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 4f8bddb760c9..c7719b283ada 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -742,7 +742,7 @@ static int __init nf_nat_init(void)
742 spin_unlock_bh(&nf_nat_lock); 742 spin_unlock_bh(&nf_nat_lock);
743 743
744 /* Initialize fake conntrack so that NAT will skip it */ 744 /* Initialize fake conntrack so that NAT will skip it */
745 nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK; 745 nf_ct_untracked_status_or(IPS_NAT_DONE_MASK);
746 746
747 l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET); 747 l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
748 748
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
index beb25819c9c9..6723c682250d 100644
--- a/net/ipv4/netfilter/nf_nat_standalone.c
+++ b/net/ipv4/netfilter/nf_nat_standalone.c
@@ -98,7 +98,7 @@ nf_nat_fn(unsigned int hooknum,
98 return NF_ACCEPT; 98 return NF_ACCEPT;
99 99
100 /* Don't try to NAT if this packet is not conntracked */ 100 /* Don't try to NAT if this packet is not conntracked */
101 if (ct == &nf_conntrack_untracked) 101 if (nf_ct_is_untracked(ct))
102 return NF_ACCEPT; 102 return NF_ACCEPT;
103 103
104 nat = nfct_nat(ct); 104 nat = nfct_nat(ct);
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index a74951c039b6..7155b2451d7c 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -151,9 +151,7 @@ static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook,
151 protocol, 151 protocol,
152 csum_sub(0, hsum))); 152 csum_sub(0, hsum)));
153 skb->ip_summed = CHECKSUM_NONE; 153 skb->ip_summed = CHECKSUM_NONE;
154 csum = __skb_checksum_complete_head(skb, dataoff + len); 154 return __skb_checksum_complete_head(skb, dataoff + len);
155 if (!csum)
156 skb->ip_summed = CHECKSUM_UNNECESSARY;
157 } 155 }
158 return csum; 156 return csum;
159}; 157};
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 8c201743d96d..413ab0754e1f 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -43,7 +43,7 @@ typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long);
43 43
44static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE; 44static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
45static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT; 45static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
46static DEFINE_RWLOCK(queue_lock); 46static DEFINE_SPINLOCK(queue_lock);
47static int peer_pid __read_mostly; 47static int peer_pid __read_mostly;
48static unsigned int copy_range __read_mostly; 48static unsigned int copy_range __read_mostly;
49static unsigned int queue_total; 49static unsigned int queue_total;
@@ -73,10 +73,10 @@ __ipq_set_mode(unsigned char mode, unsigned int range)
73 break; 73 break;
74 74
75 case IPQ_COPY_PACKET: 75 case IPQ_COPY_PACKET:
76 copy_mode = mode; 76 if (range > 0xFFFF)
77 range = 0xFFFF;
77 copy_range = range; 78 copy_range = range;
78 if (copy_range > 0xFFFF) 79 copy_mode = mode;
79 copy_range = 0xFFFF;
80 break; 80 break;
81 81
82 default: 82 default:
@@ -102,7 +102,7 @@ ipq_find_dequeue_entry(unsigned long id)
102{ 102{
103 struct nf_queue_entry *entry = NULL, *i; 103 struct nf_queue_entry *entry = NULL, *i;
104 104
105 write_lock_bh(&queue_lock); 105 spin_lock_bh(&queue_lock);
106 106
107 list_for_each_entry(i, &queue_list, list) { 107 list_for_each_entry(i, &queue_list, list) {
108 if ((unsigned long)i == id) { 108 if ((unsigned long)i == id) {
@@ -116,7 +116,7 @@ ipq_find_dequeue_entry(unsigned long id)
116 queue_total--; 116 queue_total--;
117 } 117 }
118 118
119 write_unlock_bh(&queue_lock); 119 spin_unlock_bh(&queue_lock);
120 return entry; 120 return entry;
121} 121}
122 122
@@ -137,9 +137,9 @@ __ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
137static void 137static void
138ipq_flush(ipq_cmpfn cmpfn, unsigned long data) 138ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
139{ 139{
140 write_lock_bh(&queue_lock); 140 spin_lock_bh(&queue_lock);
141 __ipq_flush(cmpfn, data); 141 __ipq_flush(cmpfn, data);
142 write_unlock_bh(&queue_lock); 142 spin_unlock_bh(&queue_lock);
143} 143}
144 144
145static struct sk_buff * 145static struct sk_buff *
@@ -153,9 +153,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
153 struct nlmsghdr *nlh; 153 struct nlmsghdr *nlh;
154 struct timeval tv; 154 struct timeval tv;
155 155
156 read_lock_bh(&queue_lock); 156 switch (ACCESS_ONCE(copy_mode)) {
157
158 switch (copy_mode) {
159 case IPQ_COPY_META: 157 case IPQ_COPY_META:
160 case IPQ_COPY_NONE: 158 case IPQ_COPY_NONE:
161 size = NLMSG_SPACE(sizeof(*pmsg)); 159 size = NLMSG_SPACE(sizeof(*pmsg));
@@ -163,26 +161,21 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
163 161
164 case IPQ_COPY_PACKET: 162 case IPQ_COPY_PACKET:
165 if (entry->skb->ip_summed == CHECKSUM_PARTIAL && 163 if (entry->skb->ip_summed == CHECKSUM_PARTIAL &&
166 (*errp = skb_checksum_help(entry->skb))) { 164 (*errp = skb_checksum_help(entry->skb)))
167 read_unlock_bh(&queue_lock);
168 return NULL; 165 return NULL;
169 } 166
170 if (copy_range == 0 || copy_range > entry->skb->len) 167 data_len = ACCESS_ONCE(copy_range);
168 if (data_len == 0 || data_len > entry->skb->len)
171 data_len = entry->skb->len; 169 data_len = entry->skb->len;
172 else
173 data_len = copy_range;
174 170
175 size = NLMSG_SPACE(sizeof(*pmsg) + data_len); 171 size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
176 break; 172 break;
177 173
178 default: 174 default:
179 *errp = -EINVAL; 175 *errp = -EINVAL;
180 read_unlock_bh(&queue_lock);
181 return NULL; 176 return NULL;
182 } 177 }
183 178
184 read_unlock_bh(&queue_lock);
185
186 skb = alloc_skb(size, GFP_ATOMIC); 179 skb = alloc_skb(size, GFP_ATOMIC);
187 if (!skb) 180 if (!skb)
188 goto nlmsg_failure; 181 goto nlmsg_failure;
@@ -242,7 +235,7 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
242 if (nskb == NULL) 235 if (nskb == NULL)
243 return status; 236 return status;
244 237
245 write_lock_bh(&queue_lock); 238 spin_lock_bh(&queue_lock);
246 239
247 if (!peer_pid) 240 if (!peer_pid)
248 goto err_out_free_nskb; 241 goto err_out_free_nskb;
@@ -266,14 +259,14 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
266 259
267 __ipq_enqueue_entry(entry); 260 __ipq_enqueue_entry(entry);
268 261
269 write_unlock_bh(&queue_lock); 262 spin_unlock_bh(&queue_lock);
270 return status; 263 return status;
271 264
272err_out_free_nskb: 265err_out_free_nskb:
273 kfree_skb(nskb); 266 kfree_skb(nskb);
274 267
275err_out_unlock: 268err_out_unlock:
276 write_unlock_bh(&queue_lock); 269 spin_unlock_bh(&queue_lock);
277 return status; 270 return status;
278} 271}
279 272
@@ -342,9 +335,9 @@ ipq_set_mode(unsigned char mode, unsigned int range)
342{ 335{
343 int status; 336 int status;
344 337
345 write_lock_bh(&queue_lock); 338 spin_lock_bh(&queue_lock);
346 status = __ipq_set_mode(mode, range); 339 status = __ipq_set_mode(mode, range);
347 write_unlock_bh(&queue_lock); 340 spin_unlock_bh(&queue_lock);
348 return status; 341 return status;
349} 342}
350 343
@@ -441,11 +434,11 @@ __ipq_rcv_skb(struct sk_buff *skb)
441 if (security_netlink_recv(skb, CAP_NET_ADMIN)) 434 if (security_netlink_recv(skb, CAP_NET_ADMIN))
442 RCV_SKB_FAIL(-EPERM); 435 RCV_SKB_FAIL(-EPERM);
443 436
444 write_lock_bh(&queue_lock); 437 spin_lock_bh(&queue_lock);
445 438
446 if (peer_pid) { 439 if (peer_pid) {
447 if (peer_pid != pid) { 440 if (peer_pid != pid) {
448 write_unlock_bh(&queue_lock); 441 spin_unlock_bh(&queue_lock);
449 RCV_SKB_FAIL(-EBUSY); 442 RCV_SKB_FAIL(-EBUSY);
450 } 443 }
451 } else { 444 } else {
@@ -453,7 +446,7 @@ __ipq_rcv_skb(struct sk_buff *skb)
453 peer_pid = pid; 446 peer_pid = pid;
454 } 447 }
455 448
456 write_unlock_bh(&queue_lock); 449 spin_unlock_bh(&queue_lock);
457 450
458 status = ipq_receive_peer(NLMSG_DATA(nlh), type, 451 status = ipq_receive_peer(NLMSG_DATA(nlh), type,
459 nlmsglen - NLMSG_LENGTH(0)); 452 nlmsglen - NLMSG_LENGTH(0));
@@ -498,10 +491,10 @@ ipq_rcv_nl_event(struct notifier_block *this,
498 struct netlink_notify *n = ptr; 491 struct netlink_notify *n = ptr;
499 492
500 if (event == NETLINK_URELEASE && n->protocol == NETLINK_IP6_FW) { 493 if (event == NETLINK_URELEASE && n->protocol == NETLINK_IP6_FW) {
501 write_lock_bh(&queue_lock); 494 spin_lock_bh(&queue_lock);
502 if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid)) 495 if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid))
503 __ipq_reset(); 496 __ipq_reset();
504 write_unlock_bh(&queue_lock); 497 spin_unlock_bh(&queue_lock);
505 } 498 }
506 return NOTIFY_DONE; 499 return NOTIFY_DONE;
507} 500}
@@ -528,7 +521,7 @@ static ctl_table ipq_table[] = {
528#ifdef CONFIG_PROC_FS 521#ifdef CONFIG_PROC_FS
529static int ip6_queue_show(struct seq_file *m, void *v) 522static int ip6_queue_show(struct seq_file *m, void *v)
530{ 523{
531 read_lock_bh(&queue_lock); 524 spin_lock_bh(&queue_lock);
532 525
533 seq_printf(m, 526 seq_printf(m,
534 "Peer PID : %d\n" 527 "Peer PID : %d\n"
@@ -546,7 +539,7 @@ static int ip6_queue_show(struct seq_file *m, void *v)
546 queue_dropped, 539 queue_dropped,
547 queue_user_dropped); 540 queue_user_dropped);
548 541
549 read_unlock_bh(&queue_lock); 542 spin_unlock_bh(&queue_lock);
550 return 0; 543 return 0;
551} 544}
552 545
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 9d2d68f0e605..dc41d6d3c6c6 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -943,7 +943,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
943 (other than comefrom, which userspace doesn't care 943 (other than comefrom, which userspace doesn't care
944 about). */ 944 about). */
945 countersize = sizeof(struct xt_counters) * private->number; 945 countersize = sizeof(struct xt_counters) * private->number;
946 counters = vmalloc_node(countersize, numa_node_id()); 946 counters = vmalloc(countersize);
947 947
948 if (counters == NULL) 948 if (counters == NULL)
949 return ERR_PTR(-ENOMEM); 949 return ERR_PTR(-ENOMEM);
@@ -1213,8 +1213,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1213 struct ip6t_entry *iter; 1213 struct ip6t_entry *iter;
1214 1214
1215 ret = 0; 1215 ret = 0;
1216 counters = vmalloc_node(num_counters * sizeof(struct xt_counters), 1216 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1217 numa_node_id());
1218 if (!counters) { 1217 if (!counters) {
1219 ret = -ENOMEM; 1218 ret = -ENOMEM;
1220 goto out; 1219 goto out;
@@ -1368,7 +1367,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
1368 if (len != size + num_counters * sizeof(struct xt_counters)) 1367 if (len != size + num_counters * sizeof(struct xt_counters))
1369 return -EINVAL; 1368 return -EINVAL;
1370 1369
1371 paddc = vmalloc_node(len - size, numa_node_id()); 1370 paddc = vmalloc(len - size);
1372 if (!paddc) 1371 if (!paddc)
1373 return -ENOMEM; 1372 return -ENOMEM;
1374 1373
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 9be81776415e..1df3c8b6bf47 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -208,7 +208,7 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl,
208 type = icmp6h->icmp6_type - 130; 208 type = icmp6h->icmp6_type - 130;
209 if (type >= 0 && type < sizeof(noct_valid_new) && 209 if (type >= 0 && type < sizeof(noct_valid_new) &&
210 noct_valid_new[type]) { 210 noct_valid_new[type]) {
211 skb->nfct = &nf_conntrack_untracked.ct_general; 211 skb->nfct = &nf_ct_untracked_get()->ct_general;
212 skb->nfctinfo = IP_CT_NEW; 212 skb->nfctinfo = IP_CT_NEW;
213 nf_conntrack_get(skb->nfct); 213 nf_conntrack_get(skb->nfct);
214 return NF_ACCEPT; 214 return NF_ACCEPT;
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 6fb890187de0..9254008602d4 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -114,10 +114,8 @@ static void nf_skb_free(struct sk_buff *skb)
114} 114}
115 115
116/* Memory Tracking Functions. */ 116/* Memory Tracking Functions. */
117static inline void frag_kfree_skb(struct sk_buff *skb, unsigned int *work) 117static void frag_kfree_skb(struct sk_buff *skb)
118{ 118{
119 if (work)
120 *work -= skb->truesize;
121 atomic_sub(skb->truesize, &nf_init_frags.mem); 119 atomic_sub(skb->truesize, &nf_init_frags.mem);
122 nf_skb_free(skb); 120 nf_skb_free(skb);
123 kfree_skb(skb); 121 kfree_skb(skb);
@@ -335,7 +333,7 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
335 fq->q.fragments = next; 333 fq->q.fragments = next;
336 334
337 fq->q.meat -= free_it->len; 335 fq->q.meat -= free_it->len;
338 frag_kfree_skb(free_it, NULL); 336 frag_kfree_skb(free_it);
339 } 337 }
340 } 338 }
341 339
@@ -442,7 +440,6 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
442 skb_shinfo(head)->frag_list = head->next; 440 skb_shinfo(head)->frag_list = head->next;
443 skb_reset_transport_header(head); 441 skb_reset_transport_header(head);
444 skb_push(head, head->data - skb_network_header(head)); 442 skb_push(head, head->data - skb_network_header(head));
445 atomic_sub(head->truesize, &nf_init_frags.mem);
446 443
447 for (fp=head->next; fp; fp = fp->next) { 444 for (fp=head->next; fp; fp = fp->next) {
448 head->data_len += fp->len; 445 head->data_len += fp->len;
@@ -452,8 +449,8 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
452 else if (head->ip_summed == CHECKSUM_COMPLETE) 449 else if (head->ip_summed == CHECKSUM_COMPLETE)
453 head->csum = csum_add(head->csum, fp->csum); 450 head->csum = csum_add(head->csum, fp->csum);
454 head->truesize += fp->truesize; 451 head->truesize += fp->truesize;
455 atomic_sub(fp->truesize, &nf_init_frags.mem);
456 } 452 }
453 atomic_sub(head->truesize, &nf_init_frags.mem);
457 454
458 head->next = NULL; 455 head->next = NULL;
459 head->dev = dev; 456 head->dev = dev;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 8593a77cfea9..413ed24a968a 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -424,6 +424,18 @@ config NETFILTER_XT_TARGET_HL
424 since you can easily create immortal packets that loop 424 since you can easily create immortal packets that loop
425 forever on the network. 425 forever on the network.
426 426
427config NETFILTER_XT_TARGET_IDLETIMER
428 tristate "IDLETIMER target support"
429 depends on NETFILTER_ADVANCED
430 help
431
432 This option adds the `IDLETIMER' target. Each matching packet
433 resets the timer associated with label specified when the rule is
434 added. When the timer expires, it triggers a sysfs notification.
435 The remaining time for expiration can be read via sysfs.
436
437 To compile it as a module, choose M here. If unsure, say N.
438
427config NETFILTER_XT_TARGET_LED 439config NETFILTER_XT_TARGET_LED
428 tristate '"LED" target support' 440 tristate '"LED" target support'
429 depends on LEDS_CLASS && LEDS_TRIGGERS 441 depends on LEDS_CLASS && LEDS_TRIGGERS
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 14e3a8fd8180..e28420aac5ef 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_TCPMSS) += xt_TCPMSS.o
61obj-$(CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP) += xt_TCPOPTSTRIP.o 61obj-$(CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP) += xt_TCPOPTSTRIP.o
62obj-$(CONFIG_NETFILTER_XT_TARGET_TEE) += xt_TEE.o 62obj-$(CONFIG_NETFILTER_XT_TARGET_TEE) += xt_TEE.o
63obj-$(CONFIG_NETFILTER_XT_TARGET_TRACE) += xt_TRACE.o 63obj-$(CONFIG_NETFILTER_XT_TARGET_TRACE) += xt_TRACE.o
64obj-$(CONFIG_NETFILTER_XT_TARGET_IDLETIMER) += xt_IDLETIMER.o
64 65
65# matches 66# matches
66obj-$(CONFIG_NETFILTER_XT_MATCH_CLUSTER) += xt_cluster.o 67obj-$(CONFIG_NETFILTER_XT_MATCH_CLUSTER) += xt_cluster.o
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 77288980fae0..16b41b4e2a3c 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -62,8 +62,8 @@ EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
62unsigned int nf_conntrack_max __read_mostly; 62unsigned int nf_conntrack_max __read_mostly;
63EXPORT_SYMBOL_GPL(nf_conntrack_max); 63EXPORT_SYMBOL_GPL(nf_conntrack_max);
64 64
65struct nf_conn nf_conntrack_untracked __read_mostly; 65DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
66EXPORT_SYMBOL_GPL(nf_conntrack_untracked); 66EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
67 67
68static int nf_conntrack_hash_rnd_initted; 68static int nf_conntrack_hash_rnd_initted;
69static unsigned int nf_conntrack_hash_rnd; 69static unsigned int nf_conntrack_hash_rnd;
@@ -1181,10 +1181,21 @@ static void nf_ct_release_dying_list(struct net *net)
1181 spin_unlock_bh(&nf_conntrack_lock); 1181 spin_unlock_bh(&nf_conntrack_lock);
1182} 1182}
1183 1183
1184static int untrack_refs(void)
1185{
1186 int cnt = 0, cpu;
1187
1188 for_each_possible_cpu(cpu) {
1189 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
1190
1191 cnt += atomic_read(&ct->ct_general.use) - 1;
1192 }
1193 return cnt;
1194}
1195
1184static void nf_conntrack_cleanup_init_net(void) 1196static void nf_conntrack_cleanup_init_net(void)
1185{ 1197{
1186 /* wait until all references to nf_conntrack_untracked are dropped */ 1198 while (untrack_refs() > 0)
1187 while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
1188 schedule(); 1199 schedule();
1189 1200
1190 nf_conntrack_helper_fini(); 1201 nf_conntrack_helper_fini();
@@ -1319,10 +1330,19 @@ EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
1319module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint, 1330module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
1320 &nf_conntrack_htable_size, 0600); 1331 &nf_conntrack_htable_size, 0600);
1321 1332
1333void nf_ct_untracked_status_or(unsigned long bits)
1334{
1335 int cpu;
1336
1337 for_each_possible_cpu(cpu)
1338 per_cpu(nf_conntrack_untracked, cpu).status |= bits;
1339}
1340EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or);
1341
1322static int nf_conntrack_init_init_net(void) 1342static int nf_conntrack_init_init_net(void)
1323{ 1343{
1324 int max_factor = 8; 1344 int max_factor = 8;
1325 int ret; 1345 int ret, cpu;
1326 1346
1327 /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB 1347 /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB
1328 * machine has 512 buckets. >= 1GB machines have 16384 buckets. */ 1348 * machine has 512 buckets. >= 1GB machines have 16384 buckets. */
@@ -1361,11 +1381,13 @@ static int nf_conntrack_init_init_net(void)
1361 goto err_extend; 1381 goto err_extend;
1362#endif 1382#endif
1363 /* Set up fake conntrack: to never be deleted, not in any hashes */ 1383 /* Set up fake conntrack: to never be deleted, not in any hashes */
1364 write_pnet(&nf_conntrack_untracked.ct_net, &init_net); 1384 for_each_possible_cpu(cpu) {
1365 atomic_set(&nf_conntrack_untracked.ct_general.use, 1); 1385 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
1386 write_pnet(&ct->ct_net, &init_net);
1387 atomic_set(&ct->ct_general.use, 1);
1388 }
1366 /* - and look it like as a confirmed connection */ 1389 /* - and look it like as a confirmed connection */
1367 set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status); 1390 nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
1368
1369 return 0; 1391 return 0;
1370 1392
1371#ifdef CONFIG_NF_CONNTRACK_ZONES 1393#ifdef CONFIG_NF_CONNTRACK_ZONES
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index c42ff6aa441d..5bae1cd15eea 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -480,7 +480,7 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
480 int err; 480 int err;
481 481
482 /* ignore our fake conntrack entry */ 482 /* ignore our fake conntrack entry */
483 if (ct == &nf_conntrack_untracked) 483 if (nf_ct_is_untracked(ct))
484 return 0; 484 return 0;
485 485
486 if (events & (1 << IPCT_DESTROY)) { 486 if (events & (1 << IPCT_DESTROY)) {
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index e0504e90a0f0..6a1572b0ab41 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -66,9 +66,10 @@ struct nfulnl_instance {
66 u_int16_t group_num; /* number of this queue */ 66 u_int16_t group_num; /* number of this queue */
67 u_int16_t flags; 67 u_int16_t flags;
68 u_int8_t copy_mode; 68 u_int8_t copy_mode;
69 struct rcu_head rcu;
69}; 70};
70 71
71static DEFINE_RWLOCK(instances_lock); 72static DEFINE_SPINLOCK(instances_lock);
72static atomic_t global_seq; 73static atomic_t global_seq;
73 74
74#define INSTANCE_BUCKETS 16 75#define INSTANCE_BUCKETS 16
@@ -88,7 +89,7 @@ __instance_lookup(u_int16_t group_num)
88 struct nfulnl_instance *inst; 89 struct nfulnl_instance *inst;
89 90
90 head = &instance_table[instance_hashfn(group_num)]; 91 head = &instance_table[instance_hashfn(group_num)];
91 hlist_for_each_entry(inst, pos, head, hlist) { 92 hlist_for_each_entry_rcu(inst, pos, head, hlist) {
92 if (inst->group_num == group_num) 93 if (inst->group_num == group_num)
93 return inst; 94 return inst;
94 } 95 }
@@ -106,22 +107,26 @@ instance_lookup_get(u_int16_t group_num)
106{ 107{
107 struct nfulnl_instance *inst; 108 struct nfulnl_instance *inst;
108 109
109 read_lock_bh(&instances_lock); 110 rcu_read_lock_bh();
110 inst = __instance_lookup(group_num); 111 inst = __instance_lookup(group_num);
111 if (inst) 112 if (inst && !atomic_inc_not_zero(&inst->use))
112 instance_get(inst); 113 inst = NULL;
113 read_unlock_bh(&instances_lock); 114 rcu_read_unlock_bh();
114 115
115 return inst; 116 return inst;
116} 117}
117 118
119static void nfulnl_instance_free_rcu(struct rcu_head *head)
120{
121 kfree(container_of(head, struct nfulnl_instance, rcu));
122 module_put(THIS_MODULE);
123}
124
118static void 125static void
119instance_put(struct nfulnl_instance *inst) 126instance_put(struct nfulnl_instance *inst)
120{ 127{
121 if (inst && atomic_dec_and_test(&inst->use)) { 128 if (inst && atomic_dec_and_test(&inst->use))
122 kfree(inst); 129 call_rcu_bh(&inst->rcu, nfulnl_instance_free_rcu);
123 module_put(THIS_MODULE);
124 }
125} 130}
126 131
127static void nfulnl_timer(unsigned long data); 132static void nfulnl_timer(unsigned long data);
@@ -132,7 +137,7 @@ instance_create(u_int16_t group_num, int pid)
132 struct nfulnl_instance *inst; 137 struct nfulnl_instance *inst;
133 int err; 138 int err;
134 139
135 write_lock_bh(&instances_lock); 140 spin_lock_bh(&instances_lock);
136 if (__instance_lookup(group_num)) { 141 if (__instance_lookup(group_num)) {
137 err = -EEXIST; 142 err = -EEXIST;
138 goto out_unlock; 143 goto out_unlock;
@@ -166,32 +171,37 @@ instance_create(u_int16_t group_num, int pid)
166 inst->copy_mode = NFULNL_COPY_PACKET; 171 inst->copy_mode = NFULNL_COPY_PACKET;
167 inst->copy_range = NFULNL_COPY_RANGE_MAX; 172 inst->copy_range = NFULNL_COPY_RANGE_MAX;
168 173
169 hlist_add_head(&inst->hlist, 174 hlist_add_head_rcu(&inst->hlist,
170 &instance_table[instance_hashfn(group_num)]); 175 &instance_table[instance_hashfn(group_num)]);
171 176
172 write_unlock_bh(&instances_lock); 177 spin_unlock_bh(&instances_lock);
173 178
174 return inst; 179 return inst;
175 180
176out_unlock: 181out_unlock:
177 write_unlock_bh(&instances_lock); 182 spin_unlock_bh(&instances_lock);
178 return ERR_PTR(err); 183 return ERR_PTR(err);
179} 184}
180 185
181static void __nfulnl_flush(struct nfulnl_instance *inst); 186static void __nfulnl_flush(struct nfulnl_instance *inst);
182 187
188/* called with BH disabled */
183static void 189static void
184__instance_destroy(struct nfulnl_instance *inst) 190__instance_destroy(struct nfulnl_instance *inst)
185{ 191{
186 /* first pull it out of the global list */ 192 /* first pull it out of the global list */
187 hlist_del(&inst->hlist); 193 hlist_del_rcu(&inst->hlist);
188 194
189 /* then flush all pending packets from skb */ 195 /* then flush all pending packets from skb */
190 196
191 spin_lock_bh(&inst->lock); 197 spin_lock(&inst->lock);
198
199 /* lockless readers wont be able to use us */
200 inst->copy_mode = NFULNL_COPY_DISABLED;
201
192 if (inst->skb) 202 if (inst->skb)
193 __nfulnl_flush(inst); 203 __nfulnl_flush(inst);
194 spin_unlock_bh(&inst->lock); 204 spin_unlock(&inst->lock);
195 205
196 /* and finally put the refcount */ 206 /* and finally put the refcount */
197 instance_put(inst); 207 instance_put(inst);
@@ -200,9 +210,9 @@ __instance_destroy(struct nfulnl_instance *inst)
200static inline void 210static inline void
201instance_destroy(struct nfulnl_instance *inst) 211instance_destroy(struct nfulnl_instance *inst)
202{ 212{
203 write_lock_bh(&instances_lock); 213 spin_lock_bh(&instances_lock);
204 __instance_destroy(inst); 214 __instance_destroy(inst);
205 write_unlock_bh(&instances_lock); 215 spin_unlock_bh(&instances_lock);
206} 216}
207 217
208static int 218static int
@@ -621,6 +631,7 @@ nfulnl_log_packet(u_int8_t pf,
621 size += nla_total_size(data_len); 631 size += nla_total_size(data_len);
622 break; 632 break;
623 633
634 case NFULNL_COPY_DISABLED:
624 default: 635 default:
625 goto unlock_and_release; 636 goto unlock_and_release;
626 } 637 }
@@ -674,7 +685,7 @@ nfulnl_rcv_nl_event(struct notifier_block *this,
674 int i; 685 int i;
675 686
676 /* destroy all instances for this pid */ 687 /* destroy all instances for this pid */
677 write_lock_bh(&instances_lock); 688 spin_lock_bh(&instances_lock);
678 for (i = 0; i < INSTANCE_BUCKETS; i++) { 689 for (i = 0; i < INSTANCE_BUCKETS; i++) {
679 struct hlist_node *tmp, *t2; 690 struct hlist_node *tmp, *t2;
680 struct nfulnl_instance *inst; 691 struct nfulnl_instance *inst;
@@ -686,7 +697,7 @@ nfulnl_rcv_nl_event(struct notifier_block *this,
686 __instance_destroy(inst); 697 __instance_destroy(inst);
687 } 698 }
688 } 699 }
689 write_unlock_bh(&instances_lock); 700 spin_unlock_bh(&instances_lock);
690 } 701 }
691 return NOTIFY_DONE; 702 return NOTIFY_DONE;
692} 703}
@@ -863,19 +874,19 @@ static struct hlist_node *get_first(struct iter_state *st)
863 874
864 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { 875 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
865 if (!hlist_empty(&instance_table[st->bucket])) 876 if (!hlist_empty(&instance_table[st->bucket]))
866 return instance_table[st->bucket].first; 877 return rcu_dereference_bh(instance_table[st->bucket].first);
867 } 878 }
868 return NULL; 879 return NULL;
869} 880}
870 881
871static struct hlist_node *get_next(struct iter_state *st, struct hlist_node *h) 882static struct hlist_node *get_next(struct iter_state *st, struct hlist_node *h)
872{ 883{
873 h = h->next; 884 h = rcu_dereference_bh(h->next);
874 while (!h) { 885 while (!h) {
875 if (++st->bucket >= INSTANCE_BUCKETS) 886 if (++st->bucket >= INSTANCE_BUCKETS)
876 return NULL; 887 return NULL;
877 888
878 h = instance_table[st->bucket].first; 889 h = rcu_dereference_bh(instance_table[st->bucket].first);
879 } 890 }
880 return h; 891 return h;
881} 892}
@@ -892,9 +903,9 @@ static struct hlist_node *get_idx(struct iter_state *st, loff_t pos)
892} 903}
893 904
894static void *seq_start(struct seq_file *seq, loff_t *pos) 905static void *seq_start(struct seq_file *seq, loff_t *pos)
895 __acquires(instances_lock) 906 __acquires(rcu_bh)
896{ 907{
897 read_lock_bh(&instances_lock); 908 rcu_read_lock_bh();
898 return get_idx(seq->private, *pos); 909 return get_idx(seq->private, *pos);
899} 910}
900 911
@@ -905,9 +916,9 @@ static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
905} 916}
906 917
907static void seq_stop(struct seq_file *s, void *v) 918static void seq_stop(struct seq_file *s, void *v)
908 __releases(instances_lock) 919 __releases(rcu_bh)
909{ 920{
910 read_unlock_bh(&instances_lock); 921 rcu_read_unlock_bh();
911} 922}
912 923
913static int seq_show(struct seq_file *s, void *v) 924static int seq_show(struct seq_file *s, void *v)
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index cc3ae861e8f3..68e67d19724d 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -46,17 +46,19 @@ struct nfqnl_instance {
46 int peer_pid; 46 int peer_pid;
47 unsigned int queue_maxlen; 47 unsigned int queue_maxlen;
48 unsigned int copy_range; 48 unsigned int copy_range;
49 unsigned int queue_total;
50 unsigned int queue_dropped; 49 unsigned int queue_dropped;
51 unsigned int queue_user_dropped; 50 unsigned int queue_user_dropped;
52 51
53 unsigned int id_sequence; /* 'sequence' of pkt ids */
54 52
55 u_int16_t queue_num; /* number of this queue */ 53 u_int16_t queue_num; /* number of this queue */
56 u_int8_t copy_mode; 54 u_int8_t copy_mode;
57 55/*
58 spinlock_t lock; 56 * Following fields are dirtied for each queued packet,
59 57 * keep them in same cache line if possible.
58 */
59 spinlock_t lock;
60 unsigned int queue_total;
61 atomic_t id_sequence; /* 'sequence' of pkt ids */
60 struct list_head queue_list; /* packets in queue */ 62 struct list_head queue_list; /* packets in queue */
61}; 63};
62 64
@@ -238,32 +240,24 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
238 240
239 outdev = entry->outdev; 241 outdev = entry->outdev;
240 242
241 spin_lock_bh(&queue->lock); 243 switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) {
242
243 switch ((enum nfqnl_config_mode)queue->copy_mode) {
244 case NFQNL_COPY_META: 244 case NFQNL_COPY_META:
245 case NFQNL_COPY_NONE: 245 case NFQNL_COPY_NONE:
246 break; 246 break;
247 247
248 case NFQNL_COPY_PACKET: 248 case NFQNL_COPY_PACKET:
249 if (entskb->ip_summed == CHECKSUM_PARTIAL && 249 if (entskb->ip_summed == CHECKSUM_PARTIAL &&
250 skb_checksum_help(entskb)) { 250 skb_checksum_help(entskb))
251 spin_unlock_bh(&queue->lock);
252 return NULL; 251 return NULL;
253 } 252
254 if (queue->copy_range == 0 253 data_len = ACCESS_ONCE(queue->copy_range);
255 || queue->copy_range > entskb->len) 254 if (data_len == 0 || data_len > entskb->len)
256 data_len = entskb->len; 255 data_len = entskb->len;
257 else
258 data_len = queue->copy_range;
259 256
260 size += nla_total_size(data_len); 257 size += nla_total_size(data_len);
261 break; 258 break;
262 } 259 }
263 260
264 entry->id = queue->id_sequence++;
265
266 spin_unlock_bh(&queue->lock);
267 261
268 skb = alloc_skb(size, GFP_ATOMIC); 262 skb = alloc_skb(size, GFP_ATOMIC);
269 if (!skb) 263 if (!skb)
@@ -278,6 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
278 nfmsg->version = NFNETLINK_V0; 272 nfmsg->version = NFNETLINK_V0;
279 nfmsg->res_id = htons(queue->queue_num); 273 nfmsg->res_id = htons(queue->queue_num);
280 274
275 entry->id = atomic_inc_return(&queue->id_sequence);
281 pmsg.packet_id = htonl(entry->id); 276 pmsg.packet_id = htonl(entry->id);
282 pmsg.hw_protocol = entskb->protocol; 277 pmsg.hw_protocol = entskb->protocol;
283 pmsg.hook = entry->hook; 278 pmsg.hook = entry->hook;
@@ -868,7 +863,7 @@ static int seq_show(struct seq_file *s, void *v)
868 inst->peer_pid, inst->queue_total, 863 inst->peer_pid, inst->queue_total,
869 inst->copy_mode, inst->copy_range, 864 inst->copy_mode, inst->copy_range,
870 inst->queue_dropped, inst->queue_user_dropped, 865 inst->queue_dropped, inst->queue_user_dropped,
871 inst->id_sequence, 1); 866 atomic_read(&inst->id_sequence), 1);
872} 867}
873 868
874static const struct seq_operations nfqnl_seq_ops = { 869static const struct seq_operations nfqnl_seq_ops = {
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 562bf3266e04..0cb6053f02fd 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -67,7 +67,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par)
67 return -EINVAL; 67 return -EINVAL;
68 68
69 if (info->flags & XT_CT_NOTRACK) { 69 if (info->flags & XT_CT_NOTRACK) {
70 ct = &nf_conntrack_untracked; 70 ct = nf_ct_untracked_get();
71 atomic_inc(&ct->ct_general.use); 71 atomic_inc(&ct->ct_general.use);
72 goto out; 72 goto out;
73 } 73 }
@@ -132,7 +132,7 @@ static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par)
132 struct nf_conn *ct = info->ct; 132 struct nf_conn *ct = info->ct;
133 struct nf_conn_help *help; 133 struct nf_conn_help *help;
134 134
135 if (ct != &nf_conntrack_untracked) { 135 if (!nf_ct_is_untracked(ct)) {
136 help = nfct_help(ct); 136 help = nfct_help(ct);
137 if (help) 137 if (help)
138 module_put(help->helper->me); 138 module_put(help->helper->me);
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
new file mode 100644
index 000000000000..e11090a0675c
--- /dev/null
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -0,0 +1,314 @@
1/*
2 * linux/net/netfilter/xt_IDLETIMER.c
3 *
4 * Netfilter module to trigger a timer when packet matches.
5 * After timer expires a kevent will be sent.
6 *
7 * Copyright (C) 2004, 2010 Nokia Corporation
8 * Written by Timo Teras <ext-timo.teras@nokia.com>
9 *
10 * Converted to x_tables and reworked for upstream inclusion
11 * by Luciano Coelho <luciano.coelho@nokia.com>
12 *
13 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * version 2 as published by the Free Software Foundation.
18 *
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
27 * 02110-1301 USA
28 */
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/module.h>
33#include <linux/timer.h>
34#include <linux/list.h>
35#include <linux/mutex.h>
36#include <linux/netfilter.h>
37#include <linux/netfilter/x_tables.h>
38#include <linux/netfilter/xt_IDLETIMER.h>
39#include <linux/kobject.h>
40#include <linux/workqueue.h>
41#include <linux/sysfs.h>
42
43struct idletimer_tg_attr {
44 struct attribute attr;
45 ssize_t (*show)(struct kobject *kobj,
46 struct attribute *attr, char *buf);
47};
48
49struct idletimer_tg {
50 struct list_head entry;
51 struct timer_list timer;
52 struct work_struct work;
53
54 struct kobject *kobj;
55 struct idletimer_tg_attr attr;
56
57 unsigned int refcnt;
58};
59
60static LIST_HEAD(idletimer_tg_list);
61static DEFINE_MUTEX(list_mutex);
62
63static struct kobject *idletimer_tg_kobj;
64
65static
66struct idletimer_tg *__idletimer_tg_find_by_label(const char *label)
67{
68 struct idletimer_tg *entry;
69
70 BUG_ON(!label);
71
72 list_for_each_entry(entry, &idletimer_tg_list, entry) {
73 if (!strcmp(label, entry->attr.attr.name))
74 return entry;
75 }
76
77 return NULL;
78}
79
80static ssize_t idletimer_tg_show(struct kobject *kobj, struct attribute *attr,
81 char *buf)
82{
83 struct idletimer_tg *timer;
84 unsigned long expires = 0;
85
86 mutex_lock(&list_mutex);
87
88 timer = __idletimer_tg_find_by_label(attr->name);
89 if (timer)
90 expires = timer->timer.expires;
91
92 mutex_unlock(&list_mutex);
93
94 if (time_after(expires, jiffies))
95 return sprintf(buf, "%u\n",
96 jiffies_to_msecs(expires - jiffies) / 1000);
97
98 return sprintf(buf, "0\n");
99}
100
101static void idletimer_tg_work(struct work_struct *work)
102{
103 struct idletimer_tg *timer = container_of(work, struct idletimer_tg,
104 work);
105
106 sysfs_notify(idletimer_tg_kobj, NULL, timer->attr.attr.name);
107}
108
109static void idletimer_tg_expired(unsigned long data)
110{
111 struct idletimer_tg *timer = (struct idletimer_tg *) data;
112
113 pr_debug("timer %s expired\n", timer->attr.attr.name);
114
115 schedule_work(&timer->work);
116}
117
118static int idletimer_tg_create(struct idletimer_tg_info *info)
119{
120 int ret;
121
122 info->timer = kmalloc(sizeof(*info->timer), GFP_KERNEL);
123 if (!info->timer) {
124 pr_debug("couldn't alloc timer\n");
125 ret = -ENOMEM;
126 goto out;
127 }
128
129 info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
130 if (!info->timer->attr.attr.name) {
131 pr_debug("couldn't alloc attribute name\n");
132 ret = -ENOMEM;
133 goto out_free_timer;
134 }
135 info->timer->attr.attr.mode = S_IRUGO;
136 info->timer->attr.show = idletimer_tg_show;
137
138 ret = sysfs_create_file(idletimer_tg_kobj, &info->timer->attr.attr);
139 if (ret < 0) {
140 pr_debug("couldn't add file to sysfs");
141 goto out_free_attr;
142 }
143
144 list_add(&info->timer->entry, &idletimer_tg_list);
145
146 setup_timer(&info->timer->timer, idletimer_tg_expired,
147 (unsigned long) info->timer);
148 info->timer->refcnt = 1;
149
150 mod_timer(&info->timer->timer,
151 msecs_to_jiffies(info->timeout * 1000) + jiffies);
152
153 INIT_WORK(&info->timer->work, idletimer_tg_work);
154
155 return 0;
156
157out_free_attr:
158 kfree(info->timer->attr.attr.name);
159out_free_timer:
160 kfree(info->timer);
161out:
162 return ret;
163}
164
165/*
166 * The actual xt_tables plugin.
167 */
168static unsigned int idletimer_tg_target(struct sk_buff *skb,
169 const struct xt_action_param *par)
170{
171 const struct idletimer_tg_info *info = par->targinfo;
172
173 pr_debug("resetting timer %s, timeout period %u\n",
174 info->label, info->timeout);
175
176 BUG_ON(!info->timer);
177
178 mod_timer(&info->timer->timer,
179 msecs_to_jiffies(info->timeout * 1000) + jiffies);
180
181 return XT_CONTINUE;
182}
183
184static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
185{
186 struct idletimer_tg_info *info = par->targinfo;
187 int ret;
188
189 pr_debug("checkentry targinfo%s\n", info->label);
190
191 if (info->timeout == 0) {
192 pr_debug("timeout value is zero\n");
193 return -EINVAL;
194 }
195
196 if (info->label[0] == '\0' ||
197 strnlen(info->label,
198 MAX_IDLETIMER_LABEL_SIZE) == MAX_IDLETIMER_LABEL_SIZE) {
199 pr_debug("label is empty or not nul-terminated\n");
200 return -EINVAL;
201 }
202
203 mutex_lock(&list_mutex);
204
205 info->timer = __idletimer_tg_find_by_label(info->label);
206 if (info->timer) {
207 info->timer->refcnt++;
208 mod_timer(&info->timer->timer,
209 msecs_to_jiffies(info->timeout * 1000) + jiffies);
210
211 pr_debug("increased refcnt of timer %s to %u\n",
212 info->label, info->timer->refcnt);
213 } else {
214 ret = idletimer_tg_create(info);
215 if (ret < 0) {
216 pr_debug("failed to create timer\n");
217 mutex_unlock(&list_mutex);
218 return ret;
219 }
220 }
221
222 mutex_unlock(&list_mutex);
223 return 0;
224}
225
226static void idletimer_tg_destroy(const struct xt_tgdtor_param *par)
227{
228 const struct idletimer_tg_info *info = par->targinfo;
229
230 pr_debug("destroy targinfo %s\n", info->label);
231
232 mutex_lock(&list_mutex);
233
234 if (--info->timer->refcnt == 0) {
235 pr_debug("deleting timer %s\n", info->label);
236
237 list_del(&info->timer->entry);
238 del_timer_sync(&info->timer->timer);
239 sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
240 kfree(info->timer->attr.attr.name);
241 kfree(info->timer);
242 } else {
243 pr_debug("decreased refcnt of timer %s to %u\n",
244 info->label, info->timer->refcnt);
245 }
246
247 mutex_unlock(&list_mutex);
248}
249
250static struct xt_target idletimer_tg __read_mostly = {
251 .name = "IDLETIMER",
252 .family = NFPROTO_UNSPEC,
253 .target = idletimer_tg_target,
254 .targetsize = sizeof(struct idletimer_tg_info),
255 .checkentry = idletimer_tg_checkentry,
256 .destroy = idletimer_tg_destroy,
257 .me = THIS_MODULE,
258};
259
260static struct class *idletimer_tg_class;
261
262static struct device *idletimer_tg_device;
263
264static int __init idletimer_tg_init(void)
265{
266 int err;
267
268 idletimer_tg_class = class_create(THIS_MODULE, "xt_idletimer");
269 err = PTR_ERR(idletimer_tg_class);
270 if (IS_ERR(idletimer_tg_class)) {
271 pr_debug("couldn't register device class\n");
272 goto out;
273 }
274
275 idletimer_tg_device = device_create(idletimer_tg_class, NULL,
276 MKDEV(0, 0), NULL, "timers");
277 err = PTR_ERR(idletimer_tg_device);
278 if (IS_ERR(idletimer_tg_device)) {
279 pr_debug("couldn't register system device\n");
280 goto out_class;
281 }
282
283 idletimer_tg_kobj = &idletimer_tg_device->kobj;
284
285 err = xt_register_target(&idletimer_tg);
286 if (err < 0) {
287 pr_debug("couldn't register xt target\n");
288 goto out_dev;
289 }
290
291 return 0;
292out_dev:
293 device_destroy(idletimer_tg_class, MKDEV(0, 0));
294out_class:
295 class_destroy(idletimer_tg_class);
296out:
297 return err;
298}
299
300static void __exit idletimer_tg_exit(void)
301{
302 xt_unregister_target(&idletimer_tg);
303
304 device_destroy(idletimer_tg_class, MKDEV(0, 0));
305 class_destroy(idletimer_tg_class);
306}
307
308module_init(idletimer_tg_init);
309module_exit(idletimer_tg_exit);
310
311MODULE_AUTHOR("Timo Teras <ext-timo.teras@nokia.com>");
312MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
313MODULE_DESCRIPTION("Xtables: idle time monitor");
314MODULE_LICENSE("GPL v2");
diff --git a/net/netfilter/xt_NOTRACK.c b/net/netfilter/xt_NOTRACK.c
index 512b9123252f..9d782181b6c8 100644
--- a/net/netfilter/xt_NOTRACK.c
+++ b/net/netfilter/xt_NOTRACK.c
@@ -23,7 +23,7 @@ notrack_tg(struct sk_buff *skb, const struct xt_action_param *par)
23 If there is a real ct entry correspondig to this packet, 23 If there is a real ct entry correspondig to this packet,
24 it'll hang aroun till timing out. We don't deal with it 24 it'll hang aroun till timing out. We don't deal with it
25 for performance reasons. JK */ 25 for performance reasons. JK */
26 skb->nfct = &nf_conntrack_untracked.ct_general; 26 skb->nfct = &nf_ct_untracked_get()->ct_general;
27 skb->nfctinfo = IP_CT_NEW; 27 skb->nfctinfo = IP_CT_NEW;
28 nf_conntrack_get(skb->nfct); 28 nf_conntrack_get(skb->nfct);
29 29
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index c77a85bbd9eb..22a2d421e7eb 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -104,7 +104,7 @@ tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
104#ifdef WITH_CONNTRACK 104#ifdef WITH_CONNTRACK
105 /* Avoid counting cloned packets towards the original connection. */ 105 /* Avoid counting cloned packets towards the original connection. */
106 nf_conntrack_put(skb->nfct); 106 nf_conntrack_put(skb->nfct);
107 skb->nfct = &nf_conntrack_untracked.ct_general; 107 skb->nfct = &nf_ct_untracked_get()->ct_general;
108 skb->nfctinfo = IP_CT_NEW; 108 skb->nfctinfo = IP_CT_NEW;
109 nf_conntrack_get(skb->nfct); 109 nf_conntrack_get(skb->nfct);
110#endif 110#endif
@@ -177,7 +177,7 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
177 177
178#ifdef WITH_CONNTRACK 178#ifdef WITH_CONNTRACK
179 nf_conntrack_put(skb->nfct); 179 nf_conntrack_put(skb->nfct);
180 skb->nfct = &nf_conntrack_untracked.ct_general; 180 skb->nfct = &nf_ct_untracked_get()->ct_general;
181 skb->nfctinfo = IP_CT_NEW; 181 skb->nfctinfo = IP_CT_NEW;
182 nf_conntrack_get(skb->nfct); 182 nf_conntrack_get(skb->nfct);
183#endif 183#endif
diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c
index 30b95a1c1c89..f4af1bfafb1c 100644
--- a/net/netfilter/xt_cluster.c
+++ b/net/netfilter/xt_cluster.c
@@ -120,7 +120,7 @@ xt_cluster_mt(const struct sk_buff *skb, struct xt_action_param *par)
120 if (ct == NULL) 120 if (ct == NULL)
121 return false; 121 return false;
122 122
123 if (ct == &nf_conntrack_untracked) 123 if (nf_ct_is_untracked(ct))
124 return false; 124 return false;
125 125
126 if (ct->master) 126 if (ct->master)
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c
index 39681f10291c..e536710ad916 100644
--- a/net/netfilter/xt_conntrack.c
+++ b/net/netfilter/xt_conntrack.c
@@ -123,11 +123,12 @@ conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par,
123 123
124 ct = nf_ct_get(skb, &ctinfo); 124 ct = nf_ct_get(skb, &ctinfo);
125 125
126 if (ct == &nf_conntrack_untracked) 126 if (ct) {
127 statebit = XT_CONNTRACK_STATE_UNTRACKED; 127 if (nf_ct_is_untracked(ct))
128 else if (ct != NULL) 128 statebit = XT_CONNTRACK_STATE_UNTRACKED;
129 statebit = XT_CONNTRACK_STATE_BIT(ctinfo); 129 else
130 else 130 statebit = XT_CONNTRACK_STATE_BIT(ctinfo);
131 } else
131 statebit = XT_CONNTRACK_STATE_INVALID; 132 statebit = XT_CONNTRACK_STATE_INVALID;
132 133
133 if (info->match_flags & XT_CONNTRACK_STATE) { 134 if (info->match_flags & XT_CONNTRACK_STATE) {
diff --git a/net/netfilter/xt_sctp.c b/net/netfilter/xt_sctp.c
index c04fcf385c59..ef36a56a02c6 100644
--- a/net/netfilter/xt_sctp.c
+++ b/net/netfilter/xt_sctp.c
@@ -3,6 +3,7 @@
3#include <linux/skbuff.h> 3#include <linux/skbuff.h>
4#include <net/ip.h> 4#include <net/ip.h>
5#include <net/ipv6.h> 5#include <net/ipv6.h>
6#include <net/sctp/sctp.h>
6#include <linux/sctp.h> 7#include <linux/sctp.h>
7 8
8#include <linux/netfilter/x_tables.h> 9#include <linux/netfilter/x_tables.h>
@@ -67,7 +68,7 @@ match_packet(const struct sk_buff *skb,
67 ++i, offset, sch->type, htons(sch->length), 68 ++i, offset, sch->type, htons(sch->length),
68 sch->flags); 69 sch->flags);
69#endif 70#endif
70 offset += (ntohs(sch->length) + 3) & ~3; 71 offset += WORD_ROUND(ntohs(sch->length));
71 72
72 pr_debug("skb->len: %d\toffset: %d\n", skb->len, offset); 73 pr_debug("skb->len: %d\toffset: %d\n", skb->len, offset);
73 74
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 3d54c236a1ba..1ca89908cbad 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -127,7 +127,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
127 * reply packet of an established SNAT-ted connection. */ 127 * reply packet of an established SNAT-ted connection. */
128 128
129 ct = nf_ct_get(skb, &ctinfo); 129 ct = nf_ct_get(skb, &ctinfo);
130 if (ct && (ct != &nf_conntrack_untracked) && 130 if (ct && !nf_ct_is_untracked(ct) &&
131 ((iph->protocol != IPPROTO_ICMP && 131 ((iph->protocol != IPPROTO_ICMP &&
132 ctinfo == IP_CT_IS_REPLY + IP_CT_ESTABLISHED) || 132 ctinfo == IP_CT_IS_REPLY + IP_CT_ESTABLISHED) ||
133 (iph->protocol == IPPROTO_ICMP && 133 (iph->protocol == IPPROTO_ICMP &&
diff --git a/net/netfilter/xt_state.c b/net/netfilter/xt_state.c
index e12e053d3782..a507922d80cd 100644
--- a/net/netfilter/xt_state.c
+++ b/net/netfilter/xt_state.c
@@ -26,14 +26,16 @@ state_mt(const struct sk_buff *skb, struct xt_action_param *par)
26 const struct xt_state_info *sinfo = par->matchinfo; 26 const struct xt_state_info *sinfo = par->matchinfo;
27 enum ip_conntrack_info ctinfo; 27 enum ip_conntrack_info ctinfo;
28 unsigned int statebit; 28 unsigned int statebit;
29 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
29 30
30 if (nf_ct_is_untracked(skb)) 31 if (!ct)
31 statebit = XT_STATE_UNTRACKED;
32 else if (!nf_ct_get(skb, &ctinfo))
33 statebit = XT_STATE_INVALID; 32 statebit = XT_STATE_INVALID;
34 else 33 else {
35 statebit = XT_STATE_BIT(ctinfo); 34 if (nf_ct_is_untracked(ct))
36 35 statebit = XT_STATE_UNTRACKED;
36 else
37 statebit = XT_STATE_BIT(ctinfo);
38 }
37 return (sinfo->statemask & statebit); 39 return (sinfo->statemask & statebit);
38} 40}
39 41
diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
index 96e62b8fd6b1..42ecb71d445f 100644
--- a/net/netfilter/xt_statistic.c
+++ b/net/netfilter/xt_statistic.c
@@ -18,8 +18,8 @@
18#include <linux/netfilter/x_tables.h> 18#include <linux/netfilter/x_tables.h>
19 19
20struct xt_statistic_priv { 20struct xt_statistic_priv {
21 uint32_t count; 21 atomic_t count;
22}; 22} ____cacheline_aligned_in_smp;
23 23
24MODULE_LICENSE("GPL"); 24MODULE_LICENSE("GPL");
25MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); 25MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
@@ -27,13 +27,12 @@ MODULE_DESCRIPTION("Xtables: statistics-based matching (\"Nth\", random)");
27MODULE_ALIAS("ipt_statistic"); 27MODULE_ALIAS("ipt_statistic");
28MODULE_ALIAS("ip6t_statistic"); 28MODULE_ALIAS("ip6t_statistic");
29 29
30static DEFINE_SPINLOCK(nth_lock);
31
32static bool 30static bool
33statistic_mt(const struct sk_buff *skb, struct xt_action_param *par) 31statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
34{ 32{
35 const struct xt_statistic_info *info = par->matchinfo; 33 const struct xt_statistic_info *info = par->matchinfo;
36 bool ret = info->flags & XT_STATISTIC_INVERT; 34 bool ret = info->flags & XT_STATISTIC_INVERT;
35 int nval, oval;
37 36
38 switch (info->mode) { 37 switch (info->mode) {
39 case XT_STATISTIC_MODE_RANDOM: 38 case XT_STATISTIC_MODE_RANDOM:
@@ -41,12 +40,12 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
41 ret = !ret; 40 ret = !ret;
42 break; 41 break;
43 case XT_STATISTIC_MODE_NTH: 42 case XT_STATISTIC_MODE_NTH:
44 spin_lock_bh(&nth_lock); 43 do {
45 if (info->master->count++ == info->u.nth.every) { 44 oval = atomic_read(&info->master->count);
46 info->master->count = 0; 45 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
46 } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
47 if (nval == 0)
47 ret = !ret; 48 ret = !ret;
48 }
49 spin_unlock_bh(&nth_lock);
50 break; 49 break;
51 } 50 }
52 51
@@ -64,7 +63,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
64 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL); 63 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
65 if (info->master == NULL) 64 if (info->master == NULL)
66 return -ENOMEM; 65 return -ENOMEM;
67 info->master->count = info->u.nth.count; 66 atomic_set(&info->master->count, info->u.nth.count);
68 67
69 return 0; 68 return 0;
70} 69}