diff options
author | David S. Miller <davem@davemloft.net> | 2010-06-15 16:49:24 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-06-15 16:49:24 -0400 |
commit | 16fb62b6b4d57339a0ec931b3fb8c8d0ca6414e8 (patch) | |
tree | a1041342f31a626baf3a08d09d5c81a65dd8ef28 /net/ipv4 | |
parent | a3433f35a55f7604742cae620c6dc6edfc70db6a (diff) | |
parent | f9181f4ffc71d7b7dd1906c9a11d51d6659220ae (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-next-2.6
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/netfilter.c | 4 | ||||
-rw-r--r-- | net/ipv4/netfilter/arp_tables.c | 7 | ||||
-rw-r--r-- | net/ipv4/netfilter/ip_queue.c | 57 | ||||
-rw-r--r-- | net/ipv4/netfilter/ip_tables.c | 4 | ||||
-rw-r--r-- | net/ipv4/netfilter/ipt_CLUSTERIP.c | 48 | ||||
-rw-r--r-- | net/ipv4/netfilter/nf_nat_core.c | 2 | ||||
-rw-r--r-- | net/ipv4/netfilter/nf_nat_standalone.c | 2 |
7 files changed, 62 insertions, 62 deletions
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index cfbc79af21c3..d88a46c54fd1 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c | |||
@@ -212,9 +212,7 @@ static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook, | |||
212 | skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, protocol, | 212 | skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, protocol, |
213 | skb->len - dataoff, 0); | 213 | skb->len - dataoff, 0); |
214 | skb->ip_summed = CHECKSUM_NONE; | 214 | skb->ip_summed = CHECKSUM_NONE; |
215 | csum = __skb_checksum_complete_head(skb, dataoff + len); | 215 | return __skb_checksum_complete_head(skb, dataoff + len); |
216 | if (!csum) | ||
217 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
218 | } | 216 | } |
219 | return csum; | 217 | return csum; |
220 | } | 218 | } |
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 1ac01b128621..16c0ba0a2728 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -758,7 +758,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) | |||
758 | * about). | 758 | * about). |
759 | */ | 759 | */ |
760 | countersize = sizeof(struct xt_counters) * private->number; | 760 | countersize = sizeof(struct xt_counters) * private->number; |
761 | counters = vmalloc_node(countersize, numa_node_id()); | 761 | counters = vmalloc(countersize); |
762 | 762 | ||
763 | if (counters == NULL) | 763 | if (counters == NULL) |
764 | return ERR_PTR(-ENOMEM); | 764 | return ERR_PTR(-ENOMEM); |
@@ -1005,8 +1005,7 @@ static int __do_replace(struct net *net, const char *name, | |||
1005 | struct arpt_entry *iter; | 1005 | struct arpt_entry *iter; |
1006 | 1006 | ||
1007 | ret = 0; | 1007 | ret = 0; |
1008 | counters = vmalloc_node(num_counters * sizeof(struct xt_counters), | 1008 | counters = vmalloc(num_counters * sizeof(struct xt_counters)); |
1009 | numa_node_id()); | ||
1010 | if (!counters) { | 1009 | if (!counters) { |
1011 | ret = -ENOMEM; | 1010 | ret = -ENOMEM; |
1012 | goto out; | 1011 | goto out; |
@@ -1159,7 +1158,7 @@ static int do_add_counters(struct net *net, const void __user *user, | |||
1159 | if (len != size + num_counters * sizeof(struct xt_counters)) | 1158 | if (len != size + num_counters * sizeof(struct xt_counters)) |
1160 | return -EINVAL; | 1159 | return -EINVAL; |
1161 | 1160 | ||
1162 | paddc = vmalloc_node(len - size, numa_node_id()); | 1161 | paddc = vmalloc(len - size); |
1163 | if (!paddc) | 1162 | if (!paddc) |
1164 | return -ENOMEM; | 1163 | return -ENOMEM; |
1165 | 1164 | ||
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c index a4e5fc5df4bf..d2c1311cb28d 100644 --- a/net/ipv4/netfilter/ip_queue.c +++ b/net/ipv4/netfilter/ip_queue.c | |||
@@ -42,7 +42,7 @@ typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long); | |||
42 | 42 | ||
43 | static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE; | 43 | static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE; |
44 | static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT; | 44 | static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT; |
45 | static DEFINE_RWLOCK(queue_lock); | 45 | static DEFINE_SPINLOCK(queue_lock); |
46 | static int peer_pid __read_mostly; | 46 | static int peer_pid __read_mostly; |
47 | static unsigned int copy_range __read_mostly; | 47 | static unsigned int copy_range __read_mostly; |
48 | static unsigned int queue_total; | 48 | static unsigned int queue_total; |
@@ -72,10 +72,10 @@ __ipq_set_mode(unsigned char mode, unsigned int range) | |||
72 | break; | 72 | break; |
73 | 73 | ||
74 | case IPQ_COPY_PACKET: | 74 | case IPQ_COPY_PACKET: |
75 | copy_mode = mode; | 75 | if (range > 0xFFFF) |
76 | range = 0xFFFF; | ||
76 | copy_range = range; | 77 | copy_range = range; |
77 | if (copy_range > 0xFFFF) | 78 | copy_mode = mode; |
78 | copy_range = 0xFFFF; | ||
79 | break; | 79 | break; |
80 | 80 | ||
81 | default: | 81 | default: |
@@ -101,7 +101,7 @@ ipq_find_dequeue_entry(unsigned long id) | |||
101 | { | 101 | { |
102 | struct nf_queue_entry *entry = NULL, *i; | 102 | struct nf_queue_entry *entry = NULL, *i; |
103 | 103 | ||
104 | write_lock_bh(&queue_lock); | 104 | spin_lock_bh(&queue_lock); |
105 | 105 | ||
106 | list_for_each_entry(i, &queue_list, list) { | 106 | list_for_each_entry(i, &queue_list, list) { |
107 | if ((unsigned long)i == id) { | 107 | if ((unsigned long)i == id) { |
@@ -115,7 +115,7 @@ ipq_find_dequeue_entry(unsigned long id) | |||
115 | queue_total--; | 115 | queue_total--; |
116 | } | 116 | } |
117 | 117 | ||
118 | write_unlock_bh(&queue_lock); | 118 | spin_unlock_bh(&queue_lock); |
119 | return entry; | 119 | return entry; |
120 | } | 120 | } |
121 | 121 | ||
@@ -136,9 +136,9 @@ __ipq_flush(ipq_cmpfn cmpfn, unsigned long data) | |||
136 | static void | 136 | static void |
137 | ipq_flush(ipq_cmpfn cmpfn, unsigned long data) | 137 | ipq_flush(ipq_cmpfn cmpfn, unsigned long data) |
138 | { | 138 | { |
139 | write_lock_bh(&queue_lock); | 139 | spin_lock_bh(&queue_lock); |
140 | __ipq_flush(cmpfn, data); | 140 | __ipq_flush(cmpfn, data); |
141 | write_unlock_bh(&queue_lock); | 141 | spin_unlock_bh(&queue_lock); |
142 | } | 142 | } |
143 | 143 | ||
144 | static struct sk_buff * | 144 | static struct sk_buff * |
@@ -152,9 +152,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp) | |||
152 | struct nlmsghdr *nlh; | 152 | struct nlmsghdr *nlh; |
153 | struct timeval tv; | 153 | struct timeval tv; |
154 | 154 | ||
155 | read_lock_bh(&queue_lock); | 155 | switch (ACCESS_ONCE(copy_mode)) { |
156 | |||
157 | switch (copy_mode) { | ||
158 | case IPQ_COPY_META: | 156 | case IPQ_COPY_META: |
159 | case IPQ_COPY_NONE: | 157 | case IPQ_COPY_NONE: |
160 | size = NLMSG_SPACE(sizeof(*pmsg)); | 158 | size = NLMSG_SPACE(sizeof(*pmsg)); |
@@ -162,26 +160,21 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp) | |||
162 | 160 | ||
163 | case IPQ_COPY_PACKET: | 161 | case IPQ_COPY_PACKET: |
164 | if (entry->skb->ip_summed == CHECKSUM_PARTIAL && | 162 | if (entry->skb->ip_summed == CHECKSUM_PARTIAL && |
165 | (*errp = skb_checksum_help(entry->skb))) { | 163 | (*errp = skb_checksum_help(entry->skb))) |
166 | read_unlock_bh(&queue_lock); | ||
167 | return NULL; | 164 | return NULL; |
168 | } | 165 | |
169 | if (copy_range == 0 || copy_range > entry->skb->len) | 166 | data_len = ACCESS_ONCE(copy_range); |
167 | if (data_len == 0 || data_len > entry->skb->len) | ||
170 | data_len = entry->skb->len; | 168 | data_len = entry->skb->len; |
171 | else | ||
172 | data_len = copy_range; | ||
173 | 169 | ||
174 | size = NLMSG_SPACE(sizeof(*pmsg) + data_len); | 170 | size = NLMSG_SPACE(sizeof(*pmsg) + data_len); |
175 | break; | 171 | break; |
176 | 172 | ||
177 | default: | 173 | default: |
178 | *errp = -EINVAL; | 174 | *errp = -EINVAL; |
179 | read_unlock_bh(&queue_lock); | ||
180 | return NULL; | 175 | return NULL; |
181 | } | 176 | } |
182 | 177 | ||
183 | read_unlock_bh(&queue_lock); | ||
184 | |||
185 | skb = alloc_skb(size, GFP_ATOMIC); | 178 | skb = alloc_skb(size, GFP_ATOMIC); |
186 | if (!skb) | 179 | if (!skb) |
187 | goto nlmsg_failure; | 180 | goto nlmsg_failure; |
@@ -242,7 +235,7 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) | |||
242 | if (nskb == NULL) | 235 | if (nskb == NULL) |
243 | return status; | 236 | return status; |
244 | 237 | ||
245 | write_lock_bh(&queue_lock); | 238 | spin_lock_bh(&queue_lock); |
246 | 239 | ||
247 | if (!peer_pid) | 240 | if (!peer_pid) |
248 | goto err_out_free_nskb; | 241 | goto err_out_free_nskb; |
@@ -266,14 +259,14 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) | |||
266 | 259 | ||
267 | __ipq_enqueue_entry(entry); | 260 | __ipq_enqueue_entry(entry); |
268 | 261 | ||
269 | write_unlock_bh(&queue_lock); | 262 | spin_unlock_bh(&queue_lock); |
270 | return status; | 263 | return status; |
271 | 264 | ||
272 | err_out_free_nskb: | 265 | err_out_free_nskb: |
273 | kfree_skb(nskb); | 266 | kfree_skb(nskb); |
274 | 267 | ||
275 | err_out_unlock: | 268 | err_out_unlock: |
276 | write_unlock_bh(&queue_lock); | 269 | spin_unlock_bh(&queue_lock); |
277 | return status; | 270 | return status; |
278 | } | 271 | } |
279 | 272 | ||
@@ -342,9 +335,9 @@ ipq_set_mode(unsigned char mode, unsigned int range) | |||
342 | { | 335 | { |
343 | int status; | 336 | int status; |
344 | 337 | ||
345 | write_lock_bh(&queue_lock); | 338 | spin_lock_bh(&queue_lock); |
346 | status = __ipq_set_mode(mode, range); | 339 | status = __ipq_set_mode(mode, range); |
347 | write_unlock_bh(&queue_lock); | 340 | spin_unlock_bh(&queue_lock); |
348 | return status; | 341 | return status; |
349 | } | 342 | } |
350 | 343 | ||
@@ -440,11 +433,11 @@ __ipq_rcv_skb(struct sk_buff *skb) | |||
440 | if (security_netlink_recv(skb, CAP_NET_ADMIN)) | 433 | if (security_netlink_recv(skb, CAP_NET_ADMIN)) |
441 | RCV_SKB_FAIL(-EPERM); | 434 | RCV_SKB_FAIL(-EPERM); |
442 | 435 | ||
443 | write_lock_bh(&queue_lock); | 436 | spin_lock_bh(&queue_lock); |
444 | 437 | ||
445 | if (peer_pid) { | 438 | if (peer_pid) { |
446 | if (peer_pid != pid) { | 439 | if (peer_pid != pid) { |
447 | write_unlock_bh(&queue_lock); | 440 | spin_unlock_bh(&queue_lock); |
448 | RCV_SKB_FAIL(-EBUSY); | 441 | RCV_SKB_FAIL(-EBUSY); |
449 | } | 442 | } |
450 | } else { | 443 | } else { |
@@ -452,7 +445,7 @@ __ipq_rcv_skb(struct sk_buff *skb) | |||
452 | peer_pid = pid; | 445 | peer_pid = pid; |
453 | } | 446 | } |
454 | 447 | ||
455 | write_unlock_bh(&queue_lock); | 448 | spin_unlock_bh(&queue_lock); |
456 | 449 | ||
457 | status = ipq_receive_peer(NLMSG_DATA(nlh), type, | 450 | status = ipq_receive_peer(NLMSG_DATA(nlh), type, |
458 | nlmsglen - NLMSG_LENGTH(0)); | 451 | nlmsglen - NLMSG_LENGTH(0)); |
@@ -497,10 +490,10 @@ ipq_rcv_nl_event(struct notifier_block *this, | |||
497 | struct netlink_notify *n = ptr; | 490 | struct netlink_notify *n = ptr; |
498 | 491 | ||
499 | if (event == NETLINK_URELEASE && n->protocol == NETLINK_FIREWALL) { | 492 | if (event == NETLINK_URELEASE && n->protocol == NETLINK_FIREWALL) { |
500 | write_lock_bh(&queue_lock); | 493 | spin_lock_bh(&queue_lock); |
501 | if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid)) | 494 | if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid)) |
502 | __ipq_reset(); | 495 | __ipq_reset(); |
503 | write_unlock_bh(&queue_lock); | 496 | spin_unlock_bh(&queue_lock); |
504 | } | 497 | } |
505 | return NOTIFY_DONE; | 498 | return NOTIFY_DONE; |
506 | } | 499 | } |
@@ -527,7 +520,7 @@ static ctl_table ipq_table[] = { | |||
527 | #ifdef CONFIG_PROC_FS | 520 | #ifdef CONFIG_PROC_FS |
528 | static int ip_queue_show(struct seq_file *m, void *v) | 521 | static int ip_queue_show(struct seq_file *m, void *v) |
529 | { | 522 | { |
530 | read_lock_bh(&queue_lock); | 523 | spin_lock_bh(&queue_lock); |
531 | 524 | ||
532 | seq_printf(m, | 525 | seq_printf(m, |
533 | "Peer PID : %d\n" | 526 | "Peer PID : %d\n" |
@@ -545,7 +538,7 @@ static int ip_queue_show(struct seq_file *m, void *v) | |||
545 | queue_dropped, | 538 | queue_dropped, |
546 | queue_user_dropped); | 539 | queue_user_dropped); |
547 | 540 | ||
548 | read_unlock_bh(&queue_lock); | 541 | spin_unlock_bh(&queue_lock); |
549 | return 0; | 542 | return 0; |
550 | } | 543 | } |
551 | 544 | ||
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 4b6c5ca610fc..b38c11810c65 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -928,7 +928,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) | |||
928 | (other than comefrom, which userspace doesn't care | 928 | (other than comefrom, which userspace doesn't care |
929 | about). */ | 929 | about). */ |
930 | countersize = sizeof(struct xt_counters) * private->number; | 930 | countersize = sizeof(struct xt_counters) * private->number; |
931 | counters = vmalloc_node(countersize, numa_node_id()); | 931 | counters = vmalloc(countersize); |
932 | 932 | ||
933 | if (counters == NULL) | 933 | if (counters == NULL) |
934 | return ERR_PTR(-ENOMEM); | 934 | return ERR_PTR(-ENOMEM); |
@@ -1352,7 +1352,7 @@ do_add_counters(struct net *net, const void __user *user, | |||
1352 | if (len != size + num_counters * sizeof(struct xt_counters)) | 1352 | if (len != size + num_counters * sizeof(struct xt_counters)) |
1353 | return -EINVAL; | 1353 | return -EINVAL; |
1354 | 1354 | ||
1355 | paddc = vmalloc_node(len - size, numa_node_id()); | 1355 | paddc = vmalloc(len - size); |
1356 | if (!paddc) | 1356 | if (!paddc) |
1357 | return -ENOMEM; | 1357 | return -ENOMEM; |
1358 | 1358 | ||
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index f91c94b9a790..64d0875f5192 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c | |||
@@ -53,12 +53,13 @@ struct clusterip_config { | |||
53 | #endif | 53 | #endif |
54 | enum clusterip_hashmode hash_mode; /* which hashing mode */ | 54 | enum clusterip_hashmode hash_mode; /* which hashing mode */ |
55 | u_int32_t hash_initval; /* hash initialization */ | 55 | u_int32_t hash_initval; /* hash initialization */ |
56 | struct rcu_head rcu; | ||
56 | }; | 57 | }; |
57 | 58 | ||
58 | static LIST_HEAD(clusterip_configs); | 59 | static LIST_HEAD(clusterip_configs); |
59 | 60 | ||
60 | /* clusterip_lock protects the clusterip_configs list */ | 61 | /* clusterip_lock protects the clusterip_configs list */ |
61 | static DEFINE_RWLOCK(clusterip_lock); | 62 | static DEFINE_SPINLOCK(clusterip_lock); |
62 | 63 | ||
63 | #ifdef CONFIG_PROC_FS | 64 | #ifdef CONFIG_PROC_FS |
64 | static const struct file_operations clusterip_proc_fops; | 65 | static const struct file_operations clusterip_proc_fops; |
@@ -71,11 +72,17 @@ clusterip_config_get(struct clusterip_config *c) | |||
71 | atomic_inc(&c->refcount); | 72 | atomic_inc(&c->refcount); |
72 | } | 73 | } |
73 | 74 | ||
75 | |||
76 | static void clusterip_config_rcu_free(struct rcu_head *head) | ||
77 | { | ||
78 | kfree(container_of(head, struct clusterip_config, rcu)); | ||
79 | } | ||
80 | |||
74 | static inline void | 81 | static inline void |
75 | clusterip_config_put(struct clusterip_config *c) | 82 | clusterip_config_put(struct clusterip_config *c) |
76 | { | 83 | { |
77 | if (atomic_dec_and_test(&c->refcount)) | 84 | if (atomic_dec_and_test(&c->refcount)) |
78 | kfree(c); | 85 | call_rcu_bh(&c->rcu, clusterip_config_rcu_free); |
79 | } | 86 | } |
80 | 87 | ||
81 | /* decrease the count of entries using/referencing this config. If last | 88 | /* decrease the count of entries using/referencing this config. If last |
@@ -84,10 +91,11 @@ clusterip_config_put(struct clusterip_config *c) | |||
84 | static inline void | 91 | static inline void |
85 | clusterip_config_entry_put(struct clusterip_config *c) | 92 | clusterip_config_entry_put(struct clusterip_config *c) |
86 | { | 93 | { |
87 | write_lock_bh(&clusterip_lock); | 94 | local_bh_disable(); |
88 | if (atomic_dec_and_test(&c->entries)) { | 95 | if (atomic_dec_and_lock(&c->entries, &clusterip_lock)) { |
89 | list_del(&c->list); | 96 | list_del_rcu(&c->list); |
90 | write_unlock_bh(&clusterip_lock); | 97 | spin_unlock(&clusterip_lock); |
98 | local_bh_enable(); | ||
91 | 99 | ||
92 | dev_mc_del(c->dev, c->clustermac); | 100 | dev_mc_del(c->dev, c->clustermac); |
93 | dev_put(c->dev); | 101 | dev_put(c->dev); |
@@ -100,7 +108,7 @@ clusterip_config_entry_put(struct clusterip_config *c) | |||
100 | #endif | 108 | #endif |
101 | return; | 109 | return; |
102 | } | 110 | } |
103 | write_unlock_bh(&clusterip_lock); | 111 | local_bh_enable(); |
104 | } | 112 | } |
105 | 113 | ||
106 | static struct clusterip_config * | 114 | static struct clusterip_config * |
@@ -108,7 +116,7 @@ __clusterip_config_find(__be32 clusterip) | |||
108 | { | 116 | { |
109 | struct clusterip_config *c; | 117 | struct clusterip_config *c; |
110 | 118 | ||
111 | list_for_each_entry(c, &clusterip_configs, list) { | 119 | list_for_each_entry_rcu(c, &clusterip_configs, list) { |
112 | if (c->clusterip == clusterip) | 120 | if (c->clusterip == clusterip) |
113 | return c; | 121 | return c; |
114 | } | 122 | } |
@@ -121,16 +129,15 @@ clusterip_config_find_get(__be32 clusterip, int entry) | |||
121 | { | 129 | { |
122 | struct clusterip_config *c; | 130 | struct clusterip_config *c; |
123 | 131 | ||
124 | read_lock_bh(&clusterip_lock); | 132 | rcu_read_lock_bh(); |
125 | c = __clusterip_config_find(clusterip); | 133 | c = __clusterip_config_find(clusterip); |
126 | if (!c) { | 134 | if (c) { |
127 | read_unlock_bh(&clusterip_lock); | 135 | if (unlikely(!atomic_inc_not_zero(&c->refcount))) |
128 | return NULL; | 136 | c = NULL; |
137 | else if (entry) | ||
138 | atomic_inc(&c->entries); | ||
129 | } | 139 | } |
130 | atomic_inc(&c->refcount); | 140 | rcu_read_unlock_bh(); |
131 | if (entry) | ||
132 | atomic_inc(&c->entries); | ||
133 | read_unlock_bh(&clusterip_lock); | ||
134 | 141 | ||
135 | return c; | 142 | return c; |
136 | } | 143 | } |
@@ -181,9 +188,9 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip, | |||
181 | } | 188 | } |
182 | #endif | 189 | #endif |
183 | 190 | ||
184 | write_lock_bh(&clusterip_lock); | 191 | spin_lock_bh(&clusterip_lock); |
185 | list_add(&c->list, &clusterip_configs); | 192 | list_add_rcu(&c->list, &clusterip_configs); |
186 | write_unlock_bh(&clusterip_lock); | 193 | spin_unlock_bh(&clusterip_lock); |
187 | 194 | ||
188 | return c; | 195 | return c; |
189 | } | 196 | } |
@@ -733,6 +740,9 @@ static void __exit clusterip_tg_exit(void) | |||
733 | #endif | 740 | #endif |
734 | nf_unregister_hook(&cip_arp_ops); | 741 | nf_unregister_hook(&cip_arp_ops); |
735 | xt_unregister_target(&clusterip_tg_reg); | 742 | xt_unregister_target(&clusterip_tg_reg); |
743 | |||
744 | /* Wait for completion of call_rcu_bh()'s (clusterip_config_rcu_free) */ | ||
745 | rcu_barrier_bh(); | ||
736 | } | 746 | } |
737 | 747 | ||
738 | module_init(clusterip_tg_init); | 748 | module_init(clusterip_tg_init); |
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c index 4f8bddb760c9..c7719b283ada 100644 --- a/net/ipv4/netfilter/nf_nat_core.c +++ b/net/ipv4/netfilter/nf_nat_core.c | |||
@@ -742,7 +742,7 @@ static int __init nf_nat_init(void) | |||
742 | spin_unlock_bh(&nf_nat_lock); | 742 | spin_unlock_bh(&nf_nat_lock); |
743 | 743 | ||
744 | /* Initialize fake conntrack so that NAT will skip it */ | 744 | /* Initialize fake conntrack so that NAT will skip it */ |
745 | nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK; | 745 | nf_ct_untracked_status_or(IPS_NAT_DONE_MASK); |
746 | 746 | ||
747 | l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET); | 747 | l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET); |
748 | 748 | ||
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c index beb25819c9c9..6723c682250d 100644 --- a/net/ipv4/netfilter/nf_nat_standalone.c +++ b/net/ipv4/netfilter/nf_nat_standalone.c | |||
@@ -98,7 +98,7 @@ nf_nat_fn(unsigned int hooknum, | |||
98 | return NF_ACCEPT; | 98 | return NF_ACCEPT; |
99 | 99 | ||
100 | /* Don't try to NAT if this packet is not conntracked */ | 100 | /* Don't try to NAT if this packet is not conntracked */ |
101 | if (ct == &nf_conntrack_untracked) | 101 | if (nf_ct_is_untracked(ct)) |
102 | return NF_ACCEPT; | 102 | return NF_ACCEPT; |
103 | 103 | ||
104 | nat = nfct_nat(ct); | 104 | nat = nfct_nat(ct); |